From fee84cc90542884edda60d3eec2cd47f72d67118 Mon Sep 17 00:00:00 2001 From: erifan01 Date: Mon, 11 Feb 2019 09:40:02 +0000 Subject: [PATCH] cmd/compile: add an optimization rule for math/bits.ReverseBytes16 on arm MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit This CL adds two rules to turn patterns like ((x<<8) | (x>>8)) (the type of x is uint16, "|" can also be "+" or "^") to a REV16 instruction on arm v6+. This optimization rule can be used for math/bits.ReverseBytes16. Benchmarks on arm v6: name old time/op new time/op delta ReverseBytes-32 2.86ns ± 0% 2.86ns ± 0% ~ (all equal) ReverseBytes16-32 2.86ns ± 0% 2.86ns ± 0% ~ (all equal) ReverseBytes32-32 1.29ns ± 0% 1.29ns ± 0% ~ (all equal) ReverseBytes64-32 1.43ns ± 0% 1.43ns ± 0% ~ (all equal) Change-Id: I819e633c9a9d308f8e476fb0c82d73fb73dd019f Reviewed-on: https://go-review.googlesource.com/c/go/+/159019 Reviewed-by: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/arm/ssa.go | 1 + src/cmd/compile/internal/ssa/gen/ARM.rules | 6 + src/cmd/compile/internal/ssa/gen/ARM64.rules | 138 +++++----- src/cmd/compile/internal/ssa/gen/ARMOps.go | 7 +- src/cmd/compile/internal/ssa/opGen.go | 14 + src/cmd/compile/internal/ssa/rewrite.go | 8 +- src/cmd/compile/internal/ssa/rewriteARM.go | 210 +++++++++++++++ src/cmd/compile/internal/ssa/rewriteARM64.go | 268 +++++++++---------- test/codegen/mathbits.go | 3 + 9 files changed, 445 insertions(+), 210 deletions(-) diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 9a8fabf622..320fe98707 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -659,6 +659,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpARMMVN, ssa.OpARMCLZ, ssa.OpARMREV, + ssa.OpARMREV16, ssa.OpARMRBIT, ssa.OpARMSQRTD, ssa.OpARMNEGF, diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 8b0e82f154..db418b76a6 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -1216,6 +1216,12 @@ ( ORshiftRL [c] (SLLconst x [32-c]) x) -> (SRRconst [ c] x) (XORshiftRL [c] (SLLconst x [32-c]) x) -> (SRRconst [ c] x) +// ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+". +// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by +// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL. +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (BFXU [armBFAuxInt(8, 8)] x) x) -> (REV16 x) +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x) + // use indexed loads and stores (MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem) (MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 133a893610..ca123d7375 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -1751,11 +1751,11 @@ ( ORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x) (XORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x) -(ADDshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) +(ADDshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) -> (RORWconst [32-c] x) -( ORshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) +( ORshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) -> (RORWconst [32-c] x) -(XORshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) +(XORshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) -> (RORWconst [32-c] x) (ADDshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x) ( ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x) @@ -1794,18 +1794,18 @@ -> (RORW x y) // ((x>>8) | (x<<8)) -> (REV16W x), the type of x is uint16, "|" can also be "^" or "+". -((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [arm64BFAuxInt(8, 8)] x) x) -> (REV16W x) +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 8)] x) x) -> (REV16W x) // Extract from reg pair (ADDshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x) ( ORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x) (XORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x) -(ADDshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) +(ADDshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) -> (EXTRWconst [32-c] x2 x) -( ORshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) +( ORshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) -> (EXTRWconst [32-c] x2 x) -(XORshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) +(XORshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) -> (EXTRWconst [32-c] x2 x) // Generic rules rewrite certain AND to a pair of shifts. @@ -1821,88 +1821,88 @@ // sbfiz // (x << lc) >> rc -(SRAconst [rc] (SLLconst [lc] x)) && lc > rc -> (SBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) -(MOVWreg (SLLconst [lc] x)) && lc < 32 -> (SBFIZ [arm64BFAuxInt(lc, 32-lc)] x) -(MOVHreg (SLLconst [lc] x)) && lc < 16 -> (SBFIZ [arm64BFAuxInt(lc, 16-lc)] x) -(MOVBreg (SLLconst [lc] x)) && lc < 8 -> (SBFIZ [arm64BFAuxInt(lc, 8-lc)] x) +(SRAconst [rc] (SLLconst [lc] x)) && lc > rc -> (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) +(MOVWreg (SLLconst [lc] x)) && lc < 32 -> (SBFIZ [armBFAuxInt(lc, 32-lc)] x) +(MOVHreg (SLLconst [lc] x)) && lc < 16 -> (SBFIZ [armBFAuxInt(lc, 16-lc)] x) +(MOVBreg (SLLconst [lc] x)) && lc < 8 -> (SBFIZ [armBFAuxInt(lc, 8-lc)] x) // sbfx // (x << lc) >> rc -(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc -> (SBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) -(SRAconst [rc] (MOVWreg x)) && rc < 32 -> (SBFX [arm64BFAuxInt(rc, 32-rc)] x) -(SRAconst [rc] (MOVHreg x)) && rc < 16 -> (SBFX [arm64BFAuxInt(rc, 16-rc)] x) -(SRAconst [rc] (MOVBreg x)) && rc < 8 -> (SBFX [arm64BFAuxInt(rc, 8-rc)] x) +(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc -> (SBFX [armBFAuxInt(rc-lc, 64-rc)] x) +(SRAconst [rc] (MOVWreg x)) && rc < 32 -> (SBFX [armBFAuxInt(rc, 32-rc)] x) +(SRAconst [rc] (MOVHreg x)) && rc < 16 -> (SBFX [armBFAuxInt(rc, 16-rc)] x) +(SRAconst [rc] (MOVBreg x)) && rc < 8 -> (SBFX [armBFAuxInt(rc, 8-rc)] x) // sbfiz/sbfx combinations: merge shifts into bitfield ops (SRAconst [sc] (SBFIZ [bfc] x)) && sc < getARM64BFlsb(bfc) - -> (SBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) + -> (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) (SRAconst [sc] (SBFIZ [bfc] x)) && sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - -> (SBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) + -> (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) // ubfiz // (x & ac) << sc (SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0) - -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, 0))] x) -(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFIZ [arm64BFAuxInt(sc, 32)] x) -(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFIZ [arm64BFAuxInt(sc, 16)] x) -(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFIZ [arm64BFAuxInt(sc, 8)] x) + -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) +(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFIZ [armBFAuxInt(sc, 32)] x) +(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFIZ [armBFAuxInt(sc, 16)] x) +(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFIZ [armBFAuxInt(sc, 8)] x) // (x << sc) & ac (ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc) - -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x) + -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) (MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc) - -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) + -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) (MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc) - -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) + -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) (MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc) - -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) + -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) // (x << lc) >> rc -(SRLconst [rc] (SLLconst [lc] x)) && lc > rc -> (UBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) +(SRLconst [rc] (SLLconst [lc] x)) && lc > rc -> (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) // ubfx // (x >> sc) & ac (ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0) - -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, 0))] x) -(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFX [arm64BFAuxInt(sc, 32)] x) -(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFX [arm64BFAuxInt(sc, 16)] x) -(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFX [arm64BFAuxInt(sc, 8)] x) + -> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) +(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFX [armBFAuxInt(sc, 32)] x) +(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFX [armBFAuxInt(sc, 16)] x) +(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFX [armBFAuxInt(sc, 8)] x) // (x & ac) >> sc (SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc) - -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x) + -> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) (SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc) - -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) + -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) (SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc) - -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) + -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) (SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc) - -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) + -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) // (x << lc) >> rc -(SRLconst [rc] (SLLconst [lc] x)) && lc < rc -> (UBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) +(SRLconst [rc] (SLLconst [lc] x)) && lc < rc -> (UBFX [armBFAuxInt(rc-lc, 64-rc)] x) // ubfiz/ubfx combinations: merge shifts into bitfield ops (SRLconst [sc] (UBFX [bfc] x)) && sc < getARM64BFwidth(bfc) - -> (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) + -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) (UBFX [bfc] (SRLconst [sc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64 - -> (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) + -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) (SLLconst [sc] (UBFIZ [bfc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64 - -> (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) + -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) (UBFIZ [bfc] (SLLconst [sc] x)) && sc < getARM64BFwidth(bfc) - -> (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) + -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) // ((x << c1) >> c2) >> c3 (SRLconst [sc] (UBFIZ [bfc] x)) && sc == getARM64BFlsb(bfc) -> (ANDconst [1< (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) + -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) (SRLconst [sc] (UBFIZ [bfc] x)) && sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - -> (UBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) + -> (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) // ((x << c1) << c2) >> c3 (UBFX [bfc] (SLLconst [sc] x)) && sc == getARM64BFlsb(bfc) -> (ANDconst [1< (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) + -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) (UBFX [bfc] (SLLconst [sc] x)) && sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - -> (UBFIZ [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) + -> (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) // bfi (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) @@ -1910,7 +1910,7 @@ -> (BFI [bfc] y x) (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) && lc > rc && ac == ^((1< (BFI [arm64BFAuxInt(lc-rc, 64-lc)] x y) + -> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y) // bfxil (OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1< (BFXIL [bfc] y x) @@ -2560,23 +2560,23 @@ && x.Uses == 1 && clobber(x) -> (MOVHstoreidx ptr idx w mem) -(MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) +(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) && x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) -> (MOVHstore [i-1] {s} ptr0 w mem) -(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) +(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) && x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) -> (MOVHstoreidx ptr1 idx1 w mem) -(MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) +(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) && x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) -> (MOVHstore [i-1] {s} ptr0 w mem) -(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) +(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) && x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) @@ -2653,18 +2653,18 @@ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) -> (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) -(MOVHstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) +(MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) && x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) -> (MOVWstore [i-2] {s} ptr0 w mem) -(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) +(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) && x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) -> (MOVWstoreidx ptr1 idx1 w mem) -(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) +(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) && x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) @@ -2792,9 +2792,9 @@ && clobber(x6) -> (MOVDstoreidx ptr0 idx0 (REV w) mem) (MOVBstore [i] {s} ptr w - x0:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) - x1:(MOVBstore [i-2] {s} ptr (UBFX [arm64BFAuxInt(16, 16)] w) - x2:(MOVBstore [i-3] {s} ptr (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) + x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w) + x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem)))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 @@ -2803,9 +2803,9 @@ && clobber(x2) -> (MOVWstore [i-3] {s} ptr (REVW w) mem) (MOVBstore [3] {s} p w - x0:(MOVBstore [2] {s} p (UBFX [arm64BFAuxInt(8, 24)] w) - x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [arm64BFAuxInt(16, 16)] w) - x2:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) + x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) + x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem)))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 @@ -2817,9 +2817,9 @@ && clobber(x2) -> (MOVWstoreidx ptr0 idx0 (REVW w) mem) (MOVBstoreidx ptr (ADDconst [3] idx) w - x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(8, 24)] w) - x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(16, 16)] w) - x2:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) + x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) + x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem)))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 @@ -2828,9 +2828,9 @@ && clobber(x2) -> (MOVWstoreidx ptr idx (REVW w) mem) (MOVBstoreidx ptr idx w - x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 24)] w) - x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(16, 16)] w) - x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w) + x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w) + x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem)))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 @@ -2898,21 +2898,21 @@ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) -> (MOVHstoreidx ptr0 idx0 (REV16W w) mem) -(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 8)] w) mem)) +(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem)) && x.Uses == 1 && clobber(x) -> (MOVHstore [i-1] {s} ptr (REV16W w) mem) -(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 8)] w) mem)) +(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem)) && x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) -> (MOVHstoreidx ptr0 idx0 (REV16W w) mem) -(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(8, 8)] w) mem)) +(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem)) && x.Uses == 1 && clobber(x) -> (MOVHstoreidx ptr idx (REV16W w) mem) -(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 8)] w) mem)) +(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem)) && x.Uses == 1 && clobber(x) -> (MOVHstoreidx ptr idx w mem) @@ -2926,11 +2926,11 @@ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) -> (MOVHstoreidx ptr0 idx0 (REV16W w) mem) -(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) mem)) +(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem)) && x.Uses == 1 && clobber(x) -> (MOVHstore [i-1] {s} ptr (REV16W w) mem) -(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 24)] w) mem)) +(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem)) && x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 4e2b0c5a5d..86d7e5f8ec 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -207,9 +207,10 @@ func init() { {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 - {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero - {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // reverse byte order - {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // reverse bit order + {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero + {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // reverse byte order + {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // reverse byte order in 16-bit halfwords + {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // reverse bit order // shifts {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 948bbdc32a..5fcc64f460 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -907,6 +907,7 @@ const ( OpARMSQRTD OpARMCLZ OpARMREV + OpARMREV16 OpARMRBIT OpARMSLL OpARMSLLconst @@ -12036,6 +12037,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "REV16", + argLen: 1, + asm: arm.AREV16, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "RBIT", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 9c9de750b2..dbbb33c171 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -1037,13 +1037,13 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { return false } -// encodes the lsb and width for arm64 bitfield ops into the expected auxInt format. -func arm64BFAuxInt(lsb, width int64) int64 { +// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format. +func armBFAuxInt(lsb, width int64) int64 { if lsb < 0 || lsb > 63 { - panic("ARM64 bit field lsb constant out of range") + panic("ARM(64) bit field lsb constant out of range") } if width < 1 || width > 64 { - panic("ARM64 bit field width constant out of range") + panic("ARM(64) bit field width constant out of range") } return width | lsb<<8 } diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 4fc7fdfbe1..c190ef779c 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -2933,6 +2933,8 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { func rewriteValueARM_OpARMADDshiftLL_0(v *Value) bool { b := v.Block _ = b + typ := &b.Func.Config.Types + _ = typ // match: (ADDshiftLL (MOVWconst [c]) x [d]) // cond: // result: (ADDconst [c] (SLLconst x [d])) @@ -2992,6 +2994,74 @@ func rewriteValueARM_OpARMADDshiftLL_0(v *Value) bool { v.AddArg(x) return true } + // match: (ADDshiftLL [8] (BFXU [armBFAuxInt(8, 8)] x) x) + // cond: + // result: (REV16 x) + for { + if v.Type != typ.UInt16 { + break + } + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMBFXU { + break + } + if v_0.Type != typ.UInt16 { + break + } + if v_0.AuxInt != armBFAuxInt(8, 8) { + break + } + x := v_0.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) + // cond: objabi.GOARM>=6 + // result: (REV16 x) + for { + if v.Type != typ.UInt16 { + break + } + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + if v_0.Type != typ.UInt16 { + break + } + if v_0.AuxInt != 24 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMSLLconst { + break + } + if v_0_0.AuxInt != 16 { + break + } + x := v_0_0.Args[0] + if x != v.Args[1] { + break + } + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } return false } func rewriteValueARM_OpARMADDshiftLLreg_0(v *Value) bool { @@ -11952,6 +12022,8 @@ func rewriteValueARM_OpARMORconst_0(v *Value) bool { func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { b := v.Block _ = b + typ := &b.Func.Config.Types + _ = typ // match: (ORshiftLL (MOVWconst [c]) x [d]) // cond: // result: (ORconst [c] (SLLconst x [d])) @@ -12011,6 +12083,74 @@ func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { v.AddArg(x) return true } + // match: (ORshiftLL [8] (BFXU [armBFAuxInt(8, 8)] x) x) + // cond: + // result: (REV16 x) + for { + if v.Type != typ.UInt16 { + break + } + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMBFXU { + break + } + if v_0.Type != typ.UInt16 { + break + } + if v_0.AuxInt != armBFAuxInt(8, 8) { + break + } + x := v_0.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (ORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) + // cond: objabi.GOARM>=6 + // result: (REV16 x) + for { + if v.Type != typ.UInt16 { + break + } + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + if v_0.Type != typ.UInt16 { + break + } + if v_0.AuxInt != 24 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMSLLconst { + break + } + if v_0_0.AuxInt != 16 { + break + } + x := v_0_0.Args[0] + if x != v.Args[1] { + break + } + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } // match: (ORshiftLL x y:(SLLconst x [c]) [d]) // cond: c==d // result: y @@ -17230,6 +17370,8 @@ func rewriteValueARM_OpARMXORconst_0(v *Value) bool { func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { b := v.Block _ = b + typ := &b.Func.Config.Types + _ = typ // match: (XORshiftLL (MOVWconst [c]) x [d]) // cond: // result: (XORconst [c] (SLLconst x [d])) @@ -17289,6 +17431,74 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { v.AddArg(x) return true } + // match: (XORshiftLL [8] (BFXU [armBFAuxInt(8, 8)] x) x) + // cond: + // result: (REV16 x) + for { + if v.Type != typ.UInt16 { + break + } + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMBFXU { + break + } + if v_0.Type != typ.UInt16 { + break + } + if v_0.AuxInt != armBFAuxInt(8, 8) { + break + } + x := v_0.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (XORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) + // cond: objabi.GOARM>=6 + // result: (REV16 x) + for { + if v.Type != typ.UInt16 { + break + } + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + if v_0.Type != typ.UInt16 { + break + } + if v_0.AuxInt != 24 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMSLLconst { + break + } + if v_0_0.AuxInt != 16 { + break + } + x := v_0_0.Args[0] + if x != v.Args[1] { + break + } + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } // match: (XORshiftLL x (SLLconst x [c]) [d]) // cond: c==d // result: (MOVWconst [0]) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 45801a4003..25246ce5e5 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -2366,7 +2366,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { return true } // match: (ADDshiftLL [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (RORWconst [32-c] x) for { t := v.Type @@ -2381,7 +2381,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { if x != v.Args[1] { break } - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64RORWconst) @@ -2389,7 +2389,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ADDshiftLL [8] (UBFX [arm64BFAuxInt(8, 8)] x) x) + // match: (ADDshiftLL [8] (UBFX [armBFAuxInt(8, 8)] x) x) // cond: // result: (REV16W x) for { @@ -2407,7 +2407,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { if v_0.Type != typ.UInt16 { break } - if v_0.AuxInt != arm64BFAuxInt(8, 8) { + if v_0.AuxInt != armBFAuxInt(8, 8) { break } x := v_0.Args[0] @@ -2440,7 +2440,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { return true } // match: (ADDshiftLL [c] (UBFX [bfc] x) x2) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (EXTRWconst [32-c] x2 x) for { t := v.Type @@ -2453,7 +2453,7 @@ func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { bfc := v_0.AuxInt x := v_0.Args[0] x2 := v.Args[1] - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64EXTRWconst) @@ -2912,7 +2912,7 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { } // match: (ANDconst [ac] (SLLconst [sc] x)) // cond: isARM64BFMask(sc, ac, sc) - // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x) + // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) for { ac := v.AuxInt v_0 := v.Args[0] @@ -2925,13 +2925,13 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc)) v.AddArg(x) return true } // match: (ANDconst [ac] (SRLconst [sc] x)) // cond: isARM64BFMask(sc, ac, 0) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, 0))] x) + // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) for { ac := v.AuxInt v_0 := v.Args[0] @@ -2944,7 +2944,7 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, 0)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0)) v.AddArg(x) return true } @@ -9130,7 +9130,7 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { } // match: (MOVBUreg (SLLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<8-1, sc) - // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) + // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { @@ -9142,13 +9142,13 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) v.AddArg(x) return true } // match: (MOVBUreg (SRLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<8-1, 0) - // result: (UBFX [arm64BFAuxInt(sc, 8)] x) + // result: (UBFX [armBFAuxInt(sc, 8)] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { @@ -9160,7 +9160,7 @@ func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, 8) + v.AuxInt = armBFAuxInt(sc, 8) v.AddArg(x) return true } @@ -9383,7 +9383,7 @@ func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { } // match: (MOVBreg (SLLconst [lc] x)) // cond: lc < 8 - // result: (SBFIZ [arm64BFAuxInt(lc, 8-lc)] x) + // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { @@ -9395,7 +9395,7 @@ func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { break } v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc, 8-lc) + v.AuxInt = armBFAuxInt(lc, 8-lc) v.AddArg(x) return true } @@ -9731,7 +9731,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) + // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) // result: (MOVHstore [i-1] {s} ptr0 w mem) for { @@ -9743,7 +9743,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { if v_1.Op != OpARM64UBFX { break } - if v_1.AuxInt != arm64BFAuxInt(8, 8) { + if v_1.AuxInt != armBFAuxInt(8, 8) { break } w := v_1.Args[0] @@ -9774,7 +9774,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) // result: (MOVHstoreidx ptr1 idx1 w mem) for { @@ -9794,7 +9794,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { if v_1.Op != OpARM64UBFX { break } - if v_1.AuxInt != arm64BFAuxInt(8, 8) { + if v_1.AuxInt != armBFAuxInt(8, 8) { break } w := v_1.Args[0] @@ -9819,7 +9819,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) + // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) // result: (MOVHstore [i-1] {s} ptr0 w mem) for { @@ -9831,7 +9831,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { if v_1.Op != OpARM64UBFX { break } - if v_1.AuxInt != arm64BFAuxInt(8, 24) { + if v_1.AuxInt != armBFAuxInt(8, 24) { break } w := v_1.Args[0] @@ -9862,7 +9862,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) // result: (MOVHstoreidx ptr1 idx1 w mem) for { @@ -9882,7 +9882,7 @@ func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { if v_1.Op != OpARM64UBFX { break } - if v_1.AuxInt != arm64BFAuxInt(8, 24) { + if v_1.AuxInt != armBFAuxInt(8, 24) { break } w := v_1.Args[0] @@ -10694,7 +10694,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVWstore [i-3] {s} ptr (REVW w) mem) for { @@ -10721,7 +10721,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x0_1.Op != OpARM64UBFX { break } - if x0_1.AuxInt != arm64BFAuxInt(8, 24) { + if x0_1.AuxInt != armBFAuxInt(8, 24) { break } if w != x0_1.Args[0] { @@ -10745,7 +10745,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x1_1.Op != OpARM64UBFX { break } - if x1_1.AuxInt != arm64BFAuxInt(16, 16) { + if x1_1.AuxInt != armBFAuxInt(16, 16) { break } if w != x1_1.Args[0] { @@ -10769,7 +10769,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x2_1.Op != OpARM64UBFX { break } - if x2_1.AuxInt != arm64BFAuxInt(24, 8) { + if x2_1.AuxInt != armBFAuxInt(24, 8) { break } if w != x2_1.Args[0] { @@ -10789,7 +10789,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) for { @@ -10818,7 +10818,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x0_1.Op != OpARM64UBFX { break } - if x0_1.AuxInt != arm64BFAuxInt(8, 24) { + if x0_1.AuxInt != armBFAuxInt(8, 24) { break } if w != x0_1.Args[0] { @@ -10846,7 +10846,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x1_1.Op != OpARM64UBFX { break } - if x1_1.AuxInt != arm64BFAuxInt(16, 16) { + if x1_1.AuxInt != armBFAuxInt(16, 16) { break } if w != x1_1.Args[0] { @@ -10863,7 +10863,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x2_2.Op != OpARM64UBFX { break } - if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + if x2_2.AuxInt != armBFAuxInt(24, 8) { break } if w != x2_2.Args[0] { @@ -11381,7 +11381,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) for { @@ -11408,7 +11408,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if x_1.Op != OpARM64UBFX { break } - if x_1.AuxInt != arm64BFAuxInt(8, 8) { + if x_1.AuxInt != armBFAuxInt(8, 8) { break } if w != x_1.Args[0] { @@ -11428,7 +11428,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) for { @@ -11456,7 +11456,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if x_2.Op != OpARM64UBFX { break } - if x_2.AuxInt != arm64BFAuxInt(8, 8) { + if x_2.AuxInt != armBFAuxInt(8, 8) { break } if w != x_2.Args[0] { @@ -11577,7 +11577,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) mem)) + // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) for { @@ -11604,7 +11604,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if x_1.Op != OpARM64UBFX { break } - if x_1.AuxInt != arm64BFAuxInt(8, 24) { + if x_1.AuxInt != armBFAuxInt(8, 24) { break } if w != x_1.Args[0] { @@ -11624,7 +11624,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 24)] w) mem)) + // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) for { @@ -11652,7 +11652,7 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if x_2.Op != OpARM64UBFX { break } - if x_2.AuxInt != arm64BFAuxInt(8, 24) { + if x_2.AuxInt != armBFAuxInt(8, 24) { break } if w != x_2.Args[0] { @@ -12014,7 +12014,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { b := v.Block _ = b - // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVWstoreidx ptr idx (REVW w) mem) for { @@ -12051,7 +12051,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x0_2.Op != OpARM64UBFX { break } - if x0_2.AuxInt != arm64BFAuxInt(8, 24) { + if x0_2.AuxInt != armBFAuxInt(8, 24) { break } if w != x0_2.Args[0] { @@ -12079,7 +12079,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x1_2.Op != OpARM64UBFX { break } - if x1_2.AuxInt != arm64BFAuxInt(16, 16) { + if x1_2.AuxInt != armBFAuxInt(16, 16) { break } if w != x1_2.Args[0] { @@ -12100,7 +12100,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x2_2.Op != OpARM64UBFX { break } - if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + if x2_2.AuxInt != armBFAuxInt(24, 8) { break } if w != x2_2.Args[0] { @@ -12119,7 +12119,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVWstoreidx ptr idx w mem) for { @@ -12149,7 +12149,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x0_2.Op != OpARM64UBFX { break } - if x0_2.AuxInt != arm64BFAuxInt(8, 24) { + if x0_2.AuxInt != armBFAuxInt(8, 24) { break } if w != x0_2.Args[0] { @@ -12177,7 +12177,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x1_2.Op != OpARM64UBFX { break } - if x1_2.AuxInt != arm64BFAuxInt(16, 16) { + if x1_2.AuxInt != armBFAuxInt(16, 16) { break } if w != x1_2.Args[0] { @@ -12205,7 +12205,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x2_2.Op != OpARM64UBFX { break } - if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + if x2_2.AuxInt != armBFAuxInt(24, 8) { break } if w != x2_2.Args[0] { @@ -12222,7 +12222,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx ptr idx (REV16W w) mem) for { @@ -12252,7 +12252,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x_2.Op != OpARM64UBFX { break } - if x_2.AuxInt != arm64BFAuxInt(8, 8) { + if x_2.AuxInt != armBFAuxInt(8, 8) { break } if w != x_2.Args[0] { @@ -12271,7 +12271,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx ptr idx w mem) for { @@ -12301,7 +12301,7 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { if x_2.Op != OpARM64UBFX { break } - if x_2.AuxInt != arm64BFAuxInt(8, 8) { + if x_2.AuxInt != armBFAuxInt(8, 8) { break } if w != x_2.Args[0] { @@ -13941,7 +13941,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { } // match: (MOVHUreg (SLLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<16-1, sc) - // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) + // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { @@ -13953,7 +13953,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) v.AddArg(x) return true } @@ -13962,7 +13962,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { func rewriteValueARM64_OpARM64MOVHUreg_10(v *Value) bool { // match: (MOVHUreg (SRLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<16-1, 0) - // result: (UBFX [arm64BFAuxInt(sc, 16)] x) + // result: (UBFX [armBFAuxInt(sc, 16)] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { @@ -13974,7 +13974,7 @@ func rewriteValueARM64_OpARM64MOVHUreg_10(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, 16) + v.AuxInt = armBFAuxInt(sc, 16) v.AddArg(x) return true } @@ -14423,7 +14423,7 @@ func rewriteValueARM64_OpARM64MOVHreg_10(v *Value) bool { } // match: (MOVHreg (SLLconst [lc] x)) // cond: lc < 16 - // result: (SBFIZ [arm64BFAuxInt(lc, 16-lc)] x) + // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { @@ -14435,7 +14435,7 @@ func rewriteValueARM64_OpARM64MOVHreg_10(v *Value) bool { break } v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc, 16-lc) + v.AuxInt = armBFAuxInt(lc, 16-lc) v.AddArg(x) return true } @@ -14809,7 +14809,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVHstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) + // match: (MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) // result: (MOVWstore [i-2] {s} ptr0 w mem) for { @@ -14821,7 +14821,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { if v_1.Op != OpARM64UBFX { break } - if v_1.AuxInt != arm64BFAuxInt(16, 16) { + if v_1.AuxInt != armBFAuxInt(16, 16) { break } w := v_1.Args[0] @@ -14852,7 +14852,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) + // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) // result: (MOVWstoreidx ptr1 idx1 w mem) for { @@ -14872,7 +14872,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { if v_1.Op != OpARM64UBFX { break } - if v_1.AuxInt != arm64BFAuxInt(16, 16) { + if v_1.AuxInt != armBFAuxInt(16, 16) { break } w := v_1.Args[0] @@ -14897,7 +14897,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) + // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) for { @@ -14920,7 +14920,7 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { if v_1.Op != OpARM64UBFX { break } - if v_1.AuxInt != arm64BFAuxInt(16, 16) { + if v_1.AuxInt != armBFAuxInt(16, 16) { break } w := v_1.Args[0] @@ -16610,7 +16610,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { } // match: (MOVWUreg (SLLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<32-1, sc) - // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) + // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { @@ -16622,13 +16622,13 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) v.AddArg(x) return true } // match: (MOVWUreg (SRLconst [sc] x)) // cond: isARM64BFMask(sc, 1<<32-1, 0) - // result: (UBFX [arm64BFAuxInt(sc, 32)] x) + // result: (UBFX [armBFAuxInt(sc, 32)] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { @@ -16640,7 +16640,7 @@ func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, 32) + v.AuxInt = armBFAuxInt(sc, 32) v.AddArg(x) return true } @@ -17168,7 +17168,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { } // match: (MOVWreg (SLLconst [lc] x)) // cond: lc < 32 - // result: (SBFIZ [arm64BFAuxInt(lc, 32-lc)] x) + // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { @@ -17180,7 +17180,7 @@ func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { break } v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc, 32-lc) + v.AuxInt = armBFAuxInt(lc, 32-lc) v.AddArg(x) return true } @@ -26620,7 +26620,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { return true } // match: (ORshiftLL [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (RORWconst [32-c] x) for { t := v.Type @@ -26635,7 +26635,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { if x != v.Args[1] { break } - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64RORWconst) @@ -26643,7 +26643,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftLL [8] (UBFX [arm64BFAuxInt(8, 8)] x) x) + // match: (ORshiftLL [8] (UBFX [armBFAuxInt(8, 8)] x) x) // cond: // result: (REV16W x) for { @@ -26661,7 +26661,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { if v_0.Type != typ.UInt16 { break } - if v_0.AuxInt != arm64BFAuxInt(8, 8) { + if v_0.AuxInt != armBFAuxInt(8, 8) { break } x := v_0.Args[0] @@ -26694,7 +26694,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { return true } // match: (ORshiftLL [c] (UBFX [bfc] x) x2) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (EXTRWconst [32-c] x2 x) for { t := v.Type @@ -26707,7 +26707,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { bfc := v_0.AuxInt x := v_0.Args[0] x2 := v.Args[1] - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64EXTRWconst) @@ -28883,7 +28883,7 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { } // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) // cond: lc > rc && ac == ^((1< rc - // result: (SBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) + // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) for { rc := v.AuxInt v_0 := v.Args[0] @@ -29146,13 +29146,13 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { break } v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc) + v.AuxInt = armBFAuxInt(lc-rc, 64-lc) v.AddArg(x) return true } // match: (SRAconst [rc] (SLLconst [lc] x)) // cond: lc <= rc - // result: (SBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) + // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x) for { rc := v.AuxInt v_0 := v.Args[0] @@ -29165,13 +29165,13 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { break } v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc) + v.AuxInt = armBFAuxInt(rc-lc, 64-rc) v.AddArg(x) return true } // match: (SRAconst [rc] (MOVWreg x)) // cond: rc < 32 - // result: (SBFX [arm64BFAuxInt(rc, 32-rc)] x) + // result: (SBFX [armBFAuxInt(rc, 32-rc)] x) for { rc := v.AuxInt v_0 := v.Args[0] @@ -29183,13 +29183,13 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { break } v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc, 32-rc) + v.AuxInt = armBFAuxInt(rc, 32-rc) v.AddArg(x) return true } // match: (SRAconst [rc] (MOVHreg x)) // cond: rc < 16 - // result: (SBFX [arm64BFAuxInt(rc, 16-rc)] x) + // result: (SBFX [armBFAuxInt(rc, 16-rc)] x) for { rc := v.AuxInt v_0 := v.Args[0] @@ -29201,13 +29201,13 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { break } v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc, 16-rc) + v.AuxInt = armBFAuxInt(rc, 16-rc) v.AddArg(x) return true } // match: (SRAconst [rc] (MOVBreg x)) // cond: rc < 8 - // result: (SBFX [arm64BFAuxInt(rc, 8-rc)] x) + // result: (SBFX [armBFAuxInt(rc, 8-rc)] x) for { rc := v.AuxInt v_0 := v.Args[0] @@ -29219,13 +29219,13 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { break } v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc, 8-rc) + v.AuxInt = armBFAuxInt(rc, 8-rc) v.AddArg(x) return true } // match: (SRAconst [sc] (SBFIZ [bfc] x)) // cond: sc < getARM64BFlsb(bfc) - // result: (SBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) + // result: (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29238,13 +29238,13 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { break } v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) + v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) v.AddArg(x) return true } // match: (SRAconst [sc] (SBFIZ [bfc] x)) // cond: sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - // result: (SBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) + // result: (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29257,7 +29257,7 @@ func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { break } v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) + v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } @@ -29320,7 +29320,7 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { } // match: (SRLconst [rc] (SLLconst [lc] x)) // cond: lc > rc - // result: (UBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) + // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) for { rc := v.AuxInt v_0 := v.Args[0] @@ -29333,13 +29333,13 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc) + v.AuxInt = armBFAuxInt(lc-rc, 64-lc) v.AddArg(x) return true } // match: (SRLconst [sc] (ANDconst [ac] x)) // cond: isARM64BFMask(sc, ac, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x) + // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29352,13 +29352,13 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc)) v.AddArg(x) return true } // match: (SRLconst [sc] (MOVWUreg x)) // cond: isARM64BFMask(sc, 1<<32-1, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) + // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29370,13 +29370,13 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) v.AddArg(x) return true } // match: (SRLconst [sc] (MOVHUreg x)) // cond: isARM64BFMask(sc, 1<<16-1, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) + // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29388,13 +29388,13 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) v.AddArg(x) return true } // match: (SRLconst [sc] (MOVBUreg x)) // cond: isARM64BFMask(sc, 1<<8-1, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) + // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29406,13 +29406,13 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) + v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) v.AddArg(x) return true } // match: (SRLconst [rc] (SLLconst [lc] x)) // cond: lc < rc - // result: (UBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) + // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x) for { rc := v.AuxInt v_0 := v.Args[0] @@ -29425,13 +29425,13 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc) + v.AuxInt = armBFAuxInt(rc-lc, 64-rc) v.AddArg(x) return true } // match: (SRLconst [sc] (UBFX [bfc] x)) // cond: sc < getARM64BFwidth(bfc) - // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) + // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29444,7 +29444,7 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) + v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } @@ -29472,7 +29472,7 @@ func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { func rewriteValueARM64_OpARM64SRLconst_10(v *Value) bool { // match: (SRLconst [sc] (UBFIZ [bfc] x)) // cond: sc < getARM64BFlsb(bfc) - // result: (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) + // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29485,13 +29485,13 @@ func rewriteValueARM64_OpARM64SRLconst_10(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) + v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) v.AddArg(x) return true } // match: (SRLconst [sc] (UBFIZ [bfc] x)) // cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - // result: (UBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) + // result: (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) for { sc := v.AuxInt v_0 := v.Args[0] @@ -29504,7 +29504,7 @@ func rewriteValueARM64_OpARM64SRLconst_10(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) + v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } @@ -30449,7 +30449,7 @@ func rewriteValueARM64_OpARM64TSTshiftRL_0(v *Value) bool { func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool { // match: (UBFIZ [bfc] (SLLconst [sc] x)) // cond: sc < getARM64BFwidth(bfc) - // result: (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) + // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) for { bfc := v.AuxInt v_0 := v.Args[0] @@ -30462,7 +30462,7 @@ func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) + v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } @@ -30471,7 +30471,7 @@ func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool { func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { // match: (UBFX [bfc] (SRLconst [sc] x)) // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64 - // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) + // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) for { bfc := v.AuxInt v_0 := v.Args[0] @@ -30484,7 +30484,7 @@ func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)) + v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)) v.AddArg(x) return true } @@ -30509,7 +30509,7 @@ func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { } // match: (UBFX [bfc] (SLLconst [sc] x)) // cond: sc < getARM64BFlsb(bfc) - // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) + // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) for { bfc := v.AuxInt v_0 := v.Args[0] @@ -30522,13 +30522,13 @@ func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { break } v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) + v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) v.AddArg(x) return true } // match: (UBFX [bfc] (SLLconst [sc] x)) // cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - // result: (UBFIZ [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) + // result: (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) for { bfc := v.AuxInt v_0 := v.Args[0] @@ -30541,7 +30541,7 @@ func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { break } v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) + v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } @@ -32099,7 +32099,7 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { return true } // match: (XORshiftLL [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (RORWconst [32-c] x) for { t := v.Type @@ -32114,7 +32114,7 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { if x != v.Args[1] { break } - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64RORWconst) @@ -32122,7 +32122,7 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { v.AddArg(x) return true } - // match: (XORshiftLL [8] (UBFX [arm64BFAuxInt(8, 8)] x) x) + // match: (XORshiftLL [8] (UBFX [armBFAuxInt(8, 8)] x) x) // cond: // result: (REV16W x) for { @@ -32140,7 +32140,7 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { if v_0.Type != typ.UInt16 { break } - if v_0.AuxInt != arm64BFAuxInt(8, 8) { + if v_0.AuxInt != armBFAuxInt(8, 8) { break } x := v_0.Args[0] @@ -32173,7 +32173,7 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { return true } // match: (XORshiftLL [c] (UBFX [bfc] x) x2) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (EXTRWconst [32-c] x2 x) for { t := v.Type @@ -32186,7 +32186,7 @@ func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { bfc := v_0.AuxInt x := v_0.Args[0] x2 := v.Args[1] - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64EXTRWconst) diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index cc3c91eb0d..09939bb6be 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -171,6 +171,9 @@ func ReverseBytes32(n uint32) uint32 { func ReverseBytes16(n uint16) uint16 { // amd64:"ROLW" // arm64:"REV16W",-"UBFX",-"ORR" + // arm/5:"SLL","SRL","ORR" + // arm/6:"REV16" + // arm/7:"REV16" return bits.ReverseBytes16(n) } -- 2.50.0