( ORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x)
(XORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x)
-(ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-> (RORWconst [32-c] x)
-( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-> (RORWconst [32-c] x)
-(XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-> (RORWconst [32-c] x)
(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
-> (RORW x y)
// ((x>>8) | (x<<8)) -> (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [arm64BFAuxInt(8, 8)] x) x) -> (REV16W x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) -> (REV16W x)
// Extract from reg pair
(ADDshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
( ORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
(XORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
-(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-> (EXTRWconst [32-c] x2 x)
-( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-> (EXTRWconst [32-c] x2 x)
-(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-> (EXTRWconst [32-c] x2 x)
// Generic rules rewrite certain AND to a pair of shifts.
// sbfiz
// (x << lc) >> rc
-(SRAconst [rc] (SLLconst [lc] x)) && lc > rc -> (SBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x)
-(MOVWreg (SLLconst [lc] x)) && lc < 32 -> (SBFIZ [arm64BFAuxInt(lc, 32-lc)] x)
-(MOVHreg (SLLconst [lc] x)) && lc < 16 -> (SBFIZ [arm64BFAuxInt(lc, 16-lc)] x)
-(MOVBreg (SLLconst [lc] x)) && lc < 8 -> (SBFIZ [arm64BFAuxInt(lc, 8-lc)] x)
+(SRAconst [rc] (SLLconst [lc] x)) && lc > rc -> (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+(MOVWreg (SLLconst [lc] x)) && lc < 32 -> (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHreg (SLLconst [lc] x)) && lc < 16 -> (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBreg (SLLconst [lc] x)) && lc < 8 -> (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
// sbfx
// (x << lc) >> rc
-(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc -> (SBFX [arm64BFAuxInt(rc-lc, 64-rc)] x)
-(SRAconst [rc] (MOVWreg x)) && rc < 32 -> (SBFX [arm64BFAuxInt(rc, 32-rc)] x)
-(SRAconst [rc] (MOVHreg x)) && rc < 16 -> (SBFX [arm64BFAuxInt(rc, 16-rc)] x)
-(SRAconst [rc] (MOVBreg x)) && rc < 8 -> (SBFX [arm64BFAuxInt(rc, 8-rc)] x)
+(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc -> (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+(SRAconst [rc] (MOVWreg x)) && rc < 32 -> (SBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRAconst [rc] (MOVHreg x)) && rc < 16 -> (SBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRAconst [rc] (MOVBreg x)) && rc < 8 -> (SBFX [armBFAuxInt(rc, 8-rc)] x)
// sbfiz/sbfx combinations: merge shifts into bitfield ops
(SRAconst [sc] (SBFIZ [bfc] x)) && sc < getARM64BFlsb(bfc)
- -> (SBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ -> (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= getARM64BFlsb(bfc)
&& sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- -> (SBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ -> (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
// ubfiz
// (x & ac) << sc
(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
- -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFIZ [arm64BFAuxInt(sc, 32)] x)
-(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFIZ [arm64BFAuxInt(sc, 16)] x)
-(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFIZ [arm64BFAuxInt(sc, 8)] x)
+ -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFIZ [armBFAuxInt(sc, 32)] x)
+(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFIZ [armBFAuxInt(sc, 16)] x)
+(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFIZ [armBFAuxInt(sc, 8)] x)
// (x << sc) & ac
(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
- -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
(MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
- -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
(MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
- -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
(MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
- -> (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc > rc -> (UBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x)
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc -> (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
// ubfx
// (x >> sc) & ac
(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
- -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFX [arm64BFAuxInt(sc, 32)] x)
-(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFX [arm64BFAuxInt(sc, 16)] x)
-(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFX [arm64BFAuxInt(sc, 8)] x)
+ -> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFX [armBFAuxInt(sc, 32)] x)
+(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFX [armBFAuxInt(sc, 16)] x)
+(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFX [armBFAuxInt(sc, 8)] x)
// (x & ac) >> sc
(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
- -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ -> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
(SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
- -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
(SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
- -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
(SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
- -> (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc < rc -> (UBFX [arm64BFAuxInt(rc-lc, 64-rc)] x)
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc -> (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
// ubfiz/ubfx combinations: merge shifts into bitfield ops
(SRLconst [sc] (UBFX [bfc] x)) && sc < getARM64BFwidth(bfc)
- -> (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
(UBFX [bfc] (SRLconst [sc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- -> (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
(SLLconst [sc] (UBFIZ [bfc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- -> (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
(UBFIZ [bfc] (SLLconst [sc] x)) && sc < getARM64BFwidth(bfc)
- -> (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
// ((x << c1) >> c2) >> c3
(SRLconst [sc] (UBFIZ [bfc] x)) && sc == getARM64BFlsb(bfc)
-> (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
(SRLconst [sc] (UBFIZ [bfc] x)) && sc < getARM64BFlsb(bfc)
- -> (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
(SRLconst [sc] (UBFIZ [bfc] x)) && sc > getARM64BFlsb(bfc)
&& sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- -> (UBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ -> (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
// ((x << c1) << c2) >> c3
(UBFX [bfc] (SLLconst [sc] x)) && sc == getARM64BFlsb(bfc)
-> (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
(UBFX [bfc] (SLLconst [sc] x)) && sc < getARM64BFlsb(bfc)
- -> (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
(UBFX [bfc] (SLLconst [sc] x)) && sc > getARM64BFlsb(bfc)
&& sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- -> (UBFIZ [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ -> (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
// bfi
(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
-> (BFI [bfc] y x)
(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
- -> (BFI [arm64BFAuxInt(lc-rc, 64-lc)] x y)
+ -> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
// bfxil
(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(getARM64BFwidth(bfc))-1)
-> (BFXIL [bfc] y x)
&& x.Uses == 1
&& clobber(x)
-> (MOVHstoreidx ptr idx w mem)
-(MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
-> (MOVHstore [i-1] {s} ptr0 w mem)
-(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
-> (MOVHstoreidx ptr1 idx1 w mem)
-(MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
-> (MOVHstore [i-1] {s} ptr0 w mem)
-(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
-> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
-(MOVHstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+(MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
-> (MOVWstore [i-2] {s} ptr0 w mem)
-(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
-> (MOVWstoreidx ptr1 idx1 w mem)
-(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x6)
-> (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
(MOVBstore [i] {s} ptr w
- x0:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w)
- x1:(MOVBstore [i-2] {s} ptr (UBFX [arm64BFAuxInt(16, 16)] w)
- x2:(MOVBstore [i-3] {s} ptr (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x2)
-> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
(MOVBstore [3] {s} p w
- x0:(MOVBstore [2] {s} p (UBFX [arm64BFAuxInt(8, 24)] w)
- x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [arm64BFAuxInt(16, 16)] w)
- x2:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x2)
-> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
(MOVBstoreidx ptr (ADDconst [3] idx) w
- x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(8, 24)] w)
- x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(16, 16)] w)
- x2:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x2)
-> (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
(MOVBstoreidx ptr idx w
- x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 24)] w)
- x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(16, 16)] w)
- x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem))))
&& x0.Uses == 1
&& x1.Uses == 1
&& x2.Uses == 1
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
-> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
-(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
-(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
-> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
-(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
-(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVHstoreidx ptr idx w mem)
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
-> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
-(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) mem))
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
&& x.Uses == 1
&& clobber(x)
-> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
-(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 24)] w) mem))
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
return true
}
// match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x)
- // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
// result: (RORWconst [32-c] x)
for {
t := v.Type
if x != v.Args[1] {
break
}
- if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) {
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
v.AddArg(x)
return true
}
- // match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [arm64BFAuxInt(8, 8)] x) x)
+ // match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// cond:
// result: (REV16W x)
for {
if v_0.Type != typ.UInt16 {
break
}
- if v_0.AuxInt != arm64BFAuxInt(8, 8) {
+ if v_0.AuxInt != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
return true
}
// match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x2)
- // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
bfc := v_0.AuxInt
x := v_0.Args[0]
x2 := v.Args[1]
- if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) {
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
}
// match: (ANDconst [ac] (SLLconst [sc] x))
// cond: isARM64BFMask(sc, ac, sc)
- // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
ac := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
v.AddArg(x)
return true
}
// match: (ANDconst [ac] (SRLconst [sc] x))
// cond: isARM64BFMask(sc, ac, 0)
- // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
ac := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, 0))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
v.AddArg(x)
return true
}
}
// match: (MOVBUreg (SLLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<8-1, sc)
- // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SLLconst {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
v.AddArg(x)
return true
}
// match: (MOVBUreg (SRLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<8-1, 0)
- // result: (UBFX [arm64BFAuxInt(sc, 8)] x)
+ // result: (UBFX [armBFAuxInt(sc, 8)] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SRLconst {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, 8)
+ v.AuxInt = armBFAuxInt(sc, 8)
v.AddArg(x)
return true
}
}
// match: (MOVBreg (SLLconst [lc] x))
// cond: lc < 8
- // result: (SBFIZ [arm64BFAuxInt(lc, 8-lc)] x)
+ // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SLLconst {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = arm64BFAuxInt(lc, 8-lc)
+ v.AuxInt = armBFAuxInt(lc, 8-lc)
v.AddArg(x)
return true
}
v.AddArg(mem)
return true
}
- // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
if v_1.Op != OpARM64UBFX {
break
}
- if v_1.AuxInt != arm64BFAuxInt(8, 8) {
+ if v_1.AuxInt != armBFAuxInt(8, 8) {
break
}
w := v_1.Args[0]
v.AddArg(mem)
return true
}
- // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
if v_1.Op != OpARM64UBFX {
break
}
- if v_1.AuxInt != arm64BFAuxInt(8, 8) {
+ if v_1.AuxInt != armBFAuxInt(8, 8) {
break
}
w := v_1.Args[0]
v.AddArg(mem)
return true
}
- // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
if v_1.Op != OpARM64UBFX {
break
}
- if v_1.AuxInt != arm64BFAuxInt(8, 24) {
+ if v_1.AuxInt != armBFAuxInt(8, 24) {
break
}
w := v_1.Args[0]
v.AddArg(mem)
return true
}
- // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
if v_1.Op != OpARM64UBFX {
break
}
- if v_1.AuxInt != arm64BFAuxInt(8, 24) {
+ if v_1.AuxInt != armBFAuxInt(8, 24) {
break
}
w := v_1.Args[0]
v.AddArg(mem)
return true
}
- // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
for {
if x0_1.Op != OpARM64UBFX {
break
}
- if x0_1.AuxInt != arm64BFAuxInt(8, 24) {
+ if x0_1.AuxInt != armBFAuxInt(8, 24) {
break
}
if w != x0_1.Args[0] {
if x1_1.Op != OpARM64UBFX {
break
}
- if x1_1.AuxInt != arm64BFAuxInt(16, 16) {
+ if x1_1.AuxInt != armBFAuxInt(16, 16) {
break
}
if w != x1_1.Args[0] {
if x2_1.Op != OpARM64UBFX {
break
}
- if x2_1.AuxInt != arm64BFAuxInt(24, 8) {
+ if x2_1.AuxInt != armBFAuxInt(24, 8) {
break
}
if w != x2_1.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)
// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
for {
if x0_1.Op != OpARM64UBFX {
break
}
- if x0_1.AuxInt != arm64BFAuxInt(8, 24) {
+ if x0_1.AuxInt != armBFAuxInt(8, 24) {
break
}
if w != x0_1.Args[0] {
if x1_1.Op != OpARM64UBFX {
break
}
- if x1_1.AuxInt != arm64BFAuxInt(16, 16) {
+ if x1_1.AuxInt != armBFAuxInt(16, 16) {
break
}
if w != x1_1.Args[0] {
if x2_2.Op != OpARM64UBFX {
break
}
- if x2_2.AuxInt != arm64BFAuxInt(24, 8) {
+ if x2_2.AuxInt != armBFAuxInt(24, 8) {
break
}
if w != x2_2.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
if x_1.Op != OpARM64UBFX {
break
}
- if x_1.AuxInt != arm64BFAuxInt(8, 8) {
+ if x_1.AuxInt != armBFAuxInt(8, 8) {
break
}
if w != x_1.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
if x_2.Op != OpARM64UBFX {
break
}
- if x_2.AuxInt != arm64BFAuxInt(8, 8) {
+ if x_2.AuxInt != armBFAuxInt(8, 8) {
break
}
if w != x_2.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) mem))
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
if x_1.Op != OpARM64UBFX {
break
}
- if x_1.AuxInt != arm64BFAuxInt(8, 24) {
+ if x_1.AuxInt != armBFAuxInt(8, 24) {
break
}
if w != x_1.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 24)] w) mem))
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
if x_2.Op != OpARM64UBFX {
break
}
- if x_2.AuxInt != arm64BFAuxInt(8, 24) {
+ if x_2.AuxInt != armBFAuxInt(8, 24) {
break
}
if w != x_2.Args[0] {
func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool {
b := v.Block
_ = b
- // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
// result: (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
for {
if x0_2.Op != OpARM64UBFX {
break
}
- if x0_2.AuxInt != arm64BFAuxInt(8, 24) {
+ if x0_2.AuxInt != armBFAuxInt(8, 24) {
break
}
if w != x0_2.Args[0] {
if x1_2.Op != OpARM64UBFX {
break
}
- if x1_2.AuxInt != arm64BFAuxInt(16, 16) {
+ if x1_2.AuxInt != armBFAuxInt(16, 16) {
break
}
if w != x1_2.Args[0] {
if x2_2.Op != OpARM64UBFX {
break
}
- if x2_2.AuxInt != arm64BFAuxInt(24, 8) {
+ if x2_2.AuxInt != armBFAuxInt(24, 8) {
break
}
if w != x2_2.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [arm64BFAuxInt(24, 8)] w) mem))))
+ // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
// result: (MOVWstoreidx ptr idx w mem)
for {
if x0_2.Op != OpARM64UBFX {
break
}
- if x0_2.AuxInt != arm64BFAuxInt(8, 24) {
+ if x0_2.AuxInt != armBFAuxInt(8, 24) {
break
}
if w != x0_2.Args[0] {
if x1_2.Op != OpARM64UBFX {
break
}
- if x1_2.AuxInt != arm64BFAuxInt(16, 16) {
+ if x1_2.AuxInt != armBFAuxInt(16, 16) {
break
}
if w != x1_2.Args[0] {
if x2_2.Op != OpARM64UBFX {
break
}
- if x2_2.AuxInt != arm64BFAuxInt(24, 8) {
+ if x2_2.AuxInt != armBFAuxInt(24, 8) {
break
}
if w != x2_2.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+ // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
for {
if x_2.Op != OpARM64UBFX {
break
}
- if x_2.AuxInt != arm64BFAuxInt(8, 8) {
+ if x_2.AuxInt != armBFAuxInt(8, 8) {
break
}
if w != x_2.Args[0] {
v.AddArg(mem)
return true
}
- // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 8)] w) mem))
+ // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstoreidx ptr idx w mem)
for {
if x_2.Op != OpARM64UBFX {
break
}
- if x_2.AuxInt != arm64BFAuxInt(8, 8) {
+ if x_2.AuxInt != armBFAuxInt(8, 8) {
break
}
if w != x_2.Args[0] {
}
// match: (MOVHUreg (SLLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<16-1, sc)
- // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SLLconst {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64MOVHUreg_10(v *Value) bool {
// match: (MOVHUreg (SRLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<16-1, 0)
- // result: (UBFX [arm64BFAuxInt(sc, 16)] x)
+ // result: (UBFX [armBFAuxInt(sc, 16)] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SRLconst {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, 16)
+ v.AuxInt = armBFAuxInt(sc, 16)
v.AddArg(x)
return true
}
}
// match: (MOVHreg (SLLconst [lc] x))
// cond: lc < 16
- // result: (SBFIZ [arm64BFAuxInt(lc, 16-lc)] x)
+ // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SLLconst {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = arm64BFAuxInt(lc, 16-lc)
+ v.AuxInt = armBFAuxInt(lc, 16-lc)
v.AddArg(x)
return true
}
v.AddArg(mem)
return true
}
- // match: (MOVHstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ // match: (MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w mem)
for {
if v_1.Op != OpARM64UBFX {
break
}
- if v_1.AuxInt != arm64BFAuxInt(16, 16) {
+ if v_1.AuxInt != armBFAuxInt(16, 16) {
break
}
w := v_1.Args[0]
v.AddArg(mem)
return true
}
- // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w mem)
for {
if v_1.Op != OpARM64UBFX {
break
}
- if v_1.AuxInt != arm64BFAuxInt(16, 16) {
+ if v_1.AuxInt != armBFAuxInt(16, 16) {
break
}
w := v_1.Args[0]
v.AddArg(mem)
return true
}
- // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
for {
if v_1.Op != OpARM64UBFX {
break
}
- if v_1.AuxInt != arm64BFAuxInt(16, 16) {
+ if v_1.AuxInt != armBFAuxInt(16, 16) {
break
}
w := v_1.Args[0]
}
// match: (MOVWUreg (SLLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<32-1, sc)
- // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SLLconst {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
v.AddArg(x)
return true
}
// match: (MOVWUreg (SRLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<32-1, 0)
- // result: (UBFX [arm64BFAuxInt(sc, 32)] x)
+ // result: (UBFX [armBFAuxInt(sc, 32)] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SRLconst {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, 32)
+ v.AuxInt = armBFAuxInt(sc, 32)
v.AddArg(x)
return true
}
}
// match: (MOVWreg (SLLconst [lc] x))
// cond: lc < 32
- // result: (SBFIZ [arm64BFAuxInt(lc, 32-lc)] x)
+ // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpARM64SLLconst {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = arm64BFAuxInt(lc, 32-lc)
+ v.AuxInt = armBFAuxInt(lc, 32-lc)
v.AddArg(x)
return true
}
return true
}
// match: (ORshiftLL <t> [c] (UBFX [bfc] x) x)
- // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
// result: (RORWconst [32-c] x)
for {
t := v.Type
if x != v.Args[1] {
break
}
- if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) {
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
v.AddArg(x)
return true
}
- // match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [arm64BFAuxInt(8, 8)] x) x)
+ // match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// cond:
// result: (REV16W x)
for {
if v_0.Type != typ.UInt16 {
break
}
- if v_0.AuxInt != arm64BFAuxInt(8, 8) {
+ if v_0.AuxInt != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
return true
}
// match: (ORshiftLL <t> [c] (UBFX [bfc] x) x2)
- // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
bfc := v_0.AuxInt
x := v_0.Args[0]
x2 := v.Args[1]
- if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) {
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
}
// match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
// cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
- // result: (BFI [arm64BFAuxInt(lc-rc, 64-lc)] x y)
+ // result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
for {
rc := v.AuxInt
_ = v.Args[1]
break
}
v.reset(OpARM64BFI)
- v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
v.AddArg(x)
v.AddArg(y)
return true
}
// match: (SLLconst [sc] (ANDconst [ac] x))
// cond: isARM64BFMask(sc, ac, 0)
- // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, 0))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (MOVWUreg x))
// cond: isARM64BFMask(sc, 1<<32-1, 0)
- // result: (UBFIZ [arm64BFAuxInt(sc, 32)] x)
+ // result: (UBFIZ [armBFAuxInt(sc, 32)] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, 32)
+ v.AuxInt = armBFAuxInt(sc, 32)
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (MOVHUreg x))
// cond: isARM64BFMask(sc, 1<<16-1, 0)
- // result: (UBFIZ [arm64BFAuxInt(sc, 16)] x)
+ // result: (UBFIZ [armBFAuxInt(sc, 16)] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, 16)
+ v.AuxInt = armBFAuxInt(sc, 16)
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (MOVBUreg x))
// cond: isARM64BFMask(sc, 1<<8-1, 0)
- // result: (UBFIZ [arm64BFAuxInt(sc, 8)] x)
+ // result: (UBFIZ [armBFAuxInt(sc, 8)] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc, 8)
+ v.AuxInt = armBFAuxInt(sc, 8)
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (UBFIZ [bfc] x))
// cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- // result: (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+ v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
v.AddArg(x)
return true
}
}
// match: (SRAconst [rc] (SLLconst [lc] x))
// cond: lc > rc
- // result: (SBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x)
+ // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
for {
rc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
v.AddArg(x)
return true
}
// match: (SRAconst [rc] (SLLconst [lc] x))
// cond: lc <= rc
- // result: (SBFX [arm64BFAuxInt(rc-lc, 64-rc)] x)
+ // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
rc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
v.AddArg(x)
return true
}
// match: (SRAconst [rc] (MOVWreg x))
// cond: rc < 32
- // result: (SBFX [arm64BFAuxInt(rc, 32-rc)] x)
+ // result: (SBFX [armBFAuxInt(rc, 32-rc)] x)
for {
rc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = arm64BFAuxInt(rc, 32-rc)
+ v.AuxInt = armBFAuxInt(rc, 32-rc)
v.AddArg(x)
return true
}
// match: (SRAconst [rc] (MOVHreg x))
// cond: rc < 16
- // result: (SBFX [arm64BFAuxInt(rc, 16-rc)] x)
+ // result: (SBFX [armBFAuxInt(rc, 16-rc)] x)
for {
rc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = arm64BFAuxInt(rc, 16-rc)
+ v.AuxInt = armBFAuxInt(rc, 16-rc)
v.AddArg(x)
return true
}
// match: (SRAconst [rc] (MOVBreg x))
// cond: rc < 8
- // result: (SBFX [arm64BFAuxInt(rc, 8-rc)] x)
+ // result: (SBFX [armBFAuxInt(rc, 8-rc)] x)
for {
rc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = arm64BFAuxInt(rc, 8-rc)
+ v.AuxInt = armBFAuxInt(rc, 8-rc)
v.AddArg(x)
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
// cond: sc < getARM64BFlsb(bfc)
- // result: (SBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // result: (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
v.AddArg(x)
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
// cond: sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (SBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // result: (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
v.AddArg(x)
return true
}
}
// match: (SRLconst [rc] (SLLconst [lc] x))
// cond: lc > rc
- // result: (UBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x)
+ // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
for {
rc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (ANDconst [ac] x))
// cond: isARM64BFMask(sc, ac, sc)
- // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (MOVWUreg x))
// cond: isARM64BFMask(sc, 1<<32-1, sc)
- // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (MOVHUreg x))
// cond: isARM64BFMask(sc, 1<<16-1, sc)
- // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (MOVBUreg x))
// cond: isARM64BFMask(sc, 1<<8-1, sc)
- // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+ v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
v.AddArg(x)
return true
}
// match: (SRLconst [rc] (SLLconst [lc] x))
// cond: lc < rc
- // result: (UBFX [arm64BFAuxInt(rc-lc, 64-rc)] x)
+ // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
rc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFX [bfc] x))
// cond: sc < getARM64BFwidth(bfc)
- // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+ v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64SRLconst_10(v *Value) bool {
// match: (SRLconst [sc] (UBFIZ [bfc] x))
// cond: sc < getARM64BFlsb(bfc)
- // result: (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
// cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (UBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // result: (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
for {
sc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool {
// match: (UBFIZ [bfc] (SLLconst [sc] x))
// cond: sc < getARM64BFwidth(bfc)
- // result: (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
for {
bfc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+ v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool {
// match: (UBFX [bfc] (SRLconst [sc] x))
// cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
for {
bfc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+ v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
v.AddArg(x)
return true
}
}
// match: (UBFX [bfc] (SLLconst [sc] x))
// cond: sc < getARM64BFlsb(bfc)
- // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
for {
bfc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
// cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (UBFIZ [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // result: (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
for {
bfc := v.AuxInt
v_0 := v.Args[0]
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
v.AddArg(x)
return true
}
return true
}
// match: (XORshiftLL <t> [c] (UBFX [bfc] x) x)
- // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
// result: (RORWconst [32-c] x)
for {
t := v.Type
if x != v.Args[1] {
break
}
- if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) {
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
v.AddArg(x)
return true
}
- // match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [arm64BFAuxInt(8, 8)] x) x)
+ // match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// cond:
// result: (REV16W x)
for {
if v_0.Type != typ.UInt16 {
break
}
- if v_0.AuxInt != arm64BFAuxInt(8, 8) {
+ if v_0.AuxInt != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
return true
}
// match: (XORshiftLL <t> [c] (UBFX [bfc] x) x2)
- // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
bfc := v_0.AuxInt
x := v_0.Args[0]
x2 := v.Args[1]
- if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) {
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)