(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
// merge sbfx and sign-extension into sbfx
-(MOVWreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (SBFX [bfc] x)
-(MOVHreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (SBFX [bfc] x)
-(MOVBreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (SBFX [bfc] x)
+(MOVWreg (SBFX [bfc] x)) && bfc.width() <= 32 => (SBFX [bfc] x)
+(MOVHreg (SBFX [bfc] x)) && bfc.width() <= 16 => (SBFX [bfc] x)
+(MOVBreg (SBFX [bfc] x)) && bfc.width() <= 8 => (SBFX [bfc] x)
// sbfiz/sbfx combinations: merge shifts into bitfield ops
-(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
- => (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
-(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
- && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
- => (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.lsb()
+ => (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.lsb()
+ && sc < bfc.lsb()+bfc.width()
+ => (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
(SBFX [bfc] s:(SLLconst [sc] x))
&& s.Uses == 1
- && sc <= bfc.getARM64BFlsb()
- => (SBFX [armBFAuxInt(bfc.getARM64BFlsb() - sc, bfc.getARM64BFwidth())] x)
+ && sc <= bfc.lsb()
+ => (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
(SBFX [bfc] s:(SLLconst [sc] x))
&& s.Uses == 1
- && sc > bfc.getARM64BFlsb()
- => (SBFIZ [armBFAuxInt(sc - bfc.getARM64BFlsb(), bfc.getARM64BFwidth() - (sc-bfc.getARM64BFlsb()))] x)
+ && sc > bfc.lsb()
+ => (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
// ubfiz
// (x << lc) >> rc
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
// merge ANDconst and ubfx into ubfx
(ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) =>
- (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x)
-(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0) =>
+ (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
+(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0) =>
(UBFX [bfc] x)
// merge ubfx and zero-extension into ubfx
-(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)
-(MOVHUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (UBFX [bfc] x)
-(MOVBUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (UBFX [bfc] x)
+(MOVWUreg (UBFX [bfc] x)) && bfc.width() <= 32 => (UBFX [bfc] x)
+(MOVHUreg (UBFX [bfc] x)) && bfc.width() <= 16 => (UBFX [bfc] x)
+(MOVBUreg (UBFX [bfc] x)) && bfc.width() <= 8 => (UBFX [bfc] x)
// Extracting bits from across a zero-extension boundary.
(UBFX [bfc] e:(MOVWUreg x))
&& e.Uses == 1
- && bfc.getARM64BFlsb() < 32
- => (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb()))] x)
+ && bfc.lsb() < 32
+ => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
(UBFX [bfc] e:(MOVHUreg x))
&& e.Uses == 1
- && bfc.getARM64BFlsb() < 16
- => (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb()))] x)
+ && bfc.lsb() < 16
+ => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
(UBFX [bfc] e:(MOVBUreg x))
&& e.Uses == 1
- && bfc.getARM64BFlsb() < 8
- => (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb()))] x)
+ && bfc.lsb() < 8
+ => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
// ubfiz/ubfx combinations: merge shifts into bitfield ops
-(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
- => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
-(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
- => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
-(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
- => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
-(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
- => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.width()
+ => (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
+(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.width()+bfc.lsb() < 64
+ => (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
+(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.width()+bfc.lsb() < 64
+ => (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
+(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.width()
+ => (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
// ((x << c1) >> c2) >> c3
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
- => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
- => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
- && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
- => (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.lsb()
+ => (ANDconst [1<<uint(bfc.width())-1] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.lsb()
+ => (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.lsb()
+ && sc < bfc.lsb()+bfc.width()
+ => (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
// ((x << c1) << c2) >> c3
-(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
- => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
-(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
- => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
-(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
- && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
- => (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.lsb()
+ => (ANDconst [1<<uint(bfc.width())-1] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.lsb()
+ => (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.lsb()
+ && sc < bfc.lsb()+bfc.width()
+ => (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
// bfi
(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
- && ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ && ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
=> (BFI [bfc] y x)
(ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
&& xc == ^(yc << s) // opposite masks
&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
=> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
// bfxil
-(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.width())-1)
=> (BFXIL [bfc] y x)
-(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
+(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.width()
=> (BFXIL [bfc] y x)
(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
=> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
}
// match: (ANDconst [c] (UBFX [bfc] x))
// cond: isARM64BFMask(0, c, 0)
- // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x)
+ // result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
for {
c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0))))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0))))
v.AddArg(x)
return true
}
return true
}
// match: (MOVBUreg (UBFX [bfc] x))
- // cond: bfc.getARM64BFwidth() <= 8
+ // cond: bfc.width() <= 8
// result: (UBFX [bfc] x)
for {
if v_0.Op != OpARM64UBFX {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(bfc.getARM64BFwidth() <= 8) {
+ if !(bfc.width() <= 8) {
break
}
v.reset(OpARM64UBFX)
return true
}
// match: (MOVBreg (SBFX [bfc] x))
- // cond: bfc.getARM64BFwidth() <= 8
+ // cond: bfc.width() <= 8
// result: (SBFX [bfc] x)
for {
if v_0.Op != OpARM64SBFX {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(bfc.getARM64BFwidth() <= 8) {
+ if !(bfc.width() <= 8) {
break
}
v.reset(OpARM64SBFX)
return true
}
// match: (MOVHUreg (UBFX [bfc] x))
- // cond: bfc.getARM64BFwidth() <= 16
+ // cond: bfc.width() <= 16
// result: (UBFX [bfc] x)
for {
if v_0.Op != OpARM64UBFX {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(bfc.getARM64BFwidth() <= 16) {
+ if !(bfc.width() <= 16) {
break
}
v.reset(OpARM64UBFX)
return true
}
// match: (MOVHreg (SBFX [bfc] x))
- // cond: bfc.getARM64BFwidth() <= 16
+ // cond: bfc.width() <= 16
// result: (SBFX [bfc] x)
for {
if v_0.Op != OpARM64SBFX {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(bfc.getARM64BFwidth() <= 16) {
+ if !(bfc.width() <= 16) {
break
}
v.reset(OpARM64SBFX)
return true
}
// match: (MOVWUreg (UBFX [bfc] x))
- // cond: bfc.getARM64BFwidth() <= 32
+ // cond: bfc.width() <= 32
// result: (UBFX [bfc] x)
for {
if v_0.Op != OpARM64UBFX {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(bfc.getARM64BFwidth() <= 32) {
+ if !(bfc.width() <= 32) {
break
}
v.reset(OpARM64UBFX)
return true
}
// match: (MOVWreg (SBFX [bfc] x))
- // cond: bfc.getARM64BFwidth() <= 32
+ // cond: bfc.width() <= 32
// result: (SBFX [bfc] x)
for {
if v_0.Op != OpARM64SBFX {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(bfc.getARM64BFwidth() <= 32) {
+ if !(bfc.width() <= 32) {
break
}
v.reset(OpARM64SBFX)
break
}
// match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
- // cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ // cond: ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
// result: (BFI [bfc] y x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
ac := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
- if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
+ if !(ac == ^((1<<uint(bfc.width()) - 1) << uint(bfc.lsb()))) {
continue
}
v.reset(OpARM64BFI)
break
}
// match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
- // cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ // cond: ac == ^(1<<uint(bfc.width())-1)
// result: (BFXIL [bfc] y x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
ac := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
- if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
+ if !(ac == ^(1<<uint(bfc.width()) - 1)) {
continue
}
v.reset(OpARM64BFXIL)
return true
}
// match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
- // cond: sc == bfc.getARM64BFwidth()
+ // cond: sc == bfc.width()
// result: (BFXIL [bfc] y x)
for {
sc := auxIntToInt64(v.AuxInt)
break
}
y := v_1.Args[0]
- if !(sc == bfc.getARM64BFwidth()) {
+ if !(sc == bfc.width()) {
break
}
v.reset(OpARM64BFXIL)
func rewriteValueARM64_OpARM64SBFX(v *Value) bool {
v_0 := v.Args[0]
// match: (SBFX [bfc] s:(SLLconst [sc] x))
- // cond: s.Uses == 1 && sc <= bfc.getARM64BFlsb()
- // result: (SBFX [armBFAuxInt(bfc.getARM64BFlsb() - sc, bfc.getARM64BFwidth())] x)
+ // cond: s.Uses == 1 && sc <= bfc.lsb()
+ // result: (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
s := v_0
}
sc := auxIntToInt64(s.AuxInt)
x := s.Args[0]
- if !(s.Uses == 1 && sc <= bfc.getARM64BFlsb()) {
+ if !(s.Uses == 1 && sc <= bfc.lsb()) {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
v.AddArg(x)
return true
}
// match: (SBFX [bfc] s:(SLLconst [sc] x))
- // cond: s.Uses == 1 && sc > bfc.getARM64BFlsb()
- // result: (SBFIZ [armBFAuxInt(sc - bfc.getARM64BFlsb(), bfc.getARM64BFwidth() - (sc-bfc.getARM64BFlsb()))] x)
+ // cond: s.Uses == 1 && sc > bfc.lsb()
+ // result: (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
s := v_0
}
sc := auxIntToInt64(s.AuxInt)
x := s.Args[0]
- if !(s.Uses == 1 && sc > bfc.getARM64BFlsb()) {
+ if !(s.Uses == 1 && sc > bfc.lsb()) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFwidth()-(sc-bfc.getARM64BFlsb())))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.width()-(sc-bfc.lsb())))
v.AddArg(x)
return true
}
return true
}
// match: (SLLconst [sc] (UBFIZ [bfc] x))
- // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
- // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ // cond: sc+bfc.width()+bfc.lsb() < 64
+ // result: (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ if !(sc+bfc.width()+bfc.lsb() < 64) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()))
v.AddArg(x)
return true
}
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
- // cond: sc < bfc.getARM64BFlsb()
- // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ // cond: sc < bfc.lsb()
+ // result: (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SBFIZ {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < bfc.getARM64BFlsb()) {
+ if !(sc < bfc.lsb()) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
v.AddArg(x)
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
- // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
- // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ // cond: sc >= bfc.lsb() && sc < bfc.lsb()+bfc.width()
+ // result: (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SBFIZ {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ if !(sc >= bfc.lsb() && sc < bfc.lsb()+bfc.width()) {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc))
v.AddArg(x)
return true
}
return true
}
// match: (SRLconst [sc] (UBFX [bfc] x))
- // cond: sc < bfc.getARM64BFwidth()
- // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ // cond: sc < bfc.width()
+ // result: (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < bfc.getARM64BFwidth()) {
+ if !(sc < bfc.width()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc == bfc.getARM64BFlsb()
- // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ // cond: sc == bfc.lsb()
+ // result: (ANDconst [1<<uint(bfc.width())-1] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc == bfc.getARM64BFlsb()) {
+ if !(sc == bfc.lsb()) {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.width()) - 1)
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc < bfc.getARM64BFlsb()
- // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ // cond: sc < bfc.lsb()
+ // result: (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < bfc.getARM64BFlsb()) {
+ if !(sc < bfc.lsb()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
- // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ // cond: sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()
+ // result: (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
}
bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ if !(sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc))
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
v_0 := v.Args[0]
// match: (UBFIZ [bfc] (SLLconst [sc] x))
- // cond: sc < bfc.getARM64BFwidth()
- // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ // cond: sc < bfc.width()
+ // result: (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
}
sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < bfc.getARM64BFwidth()) {
+ if !(sc < bfc.width()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc))
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
v_0 := v.Args[0]
// match: (UBFX [bfc] (ANDconst [c] x))
- // cond: isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)
+ // cond: isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0)
// result: (UBFX [bfc] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
}
c := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb()+bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)) {
+ if !(isARM64BFMask(0, c, 0) && bfc.lsb()+bfc.width() <= arm64BFWidth(c, 0)) {
break
}
v.reset(OpARM64UBFX)
return true
}
// match: (UBFX [bfc] e:(MOVWUreg x))
- // cond: e.Uses == 1 && bfc.getARM64BFlsb() < 32
- // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb()))] x)
+ // cond: e.Uses == 1 && bfc.lsb() < 32
+ // result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
e := v_0
break
}
x := e.Args[0]
- if !(e.Uses == 1 && bfc.getARM64BFlsb() < 32) {
+ if !(e.Uses == 1 && bfc.lsb() < 32) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb())))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb())))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] e:(MOVHUreg x))
- // cond: e.Uses == 1 && bfc.getARM64BFlsb() < 16
- // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb()))] x)
+ // cond: e.Uses == 1 && bfc.lsb() < 16
+ // result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
e := v_0
break
}
x := e.Args[0]
- if !(e.Uses == 1 && bfc.getARM64BFlsb() < 16) {
+ if !(e.Uses == 1 && bfc.lsb() < 16) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb())))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb())))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] e:(MOVBUreg x))
- // cond: e.Uses == 1 && bfc.getARM64BFlsb() < 8
- // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb()))] x)
+ // cond: e.Uses == 1 && bfc.lsb() < 8
+ // result: (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
e := v_0
break
}
x := e.Args[0]
- if !(e.Uses == 1 && bfc.getARM64BFlsb() < 8) {
+ if !(e.Uses == 1 && bfc.lsb() < 8) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb())))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb())))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SRLconst [sc] x))
- // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
- // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ // cond: sc+bfc.width()+bfc.lsb() < 64
+ // result: (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SRLconst {
}
sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ if !(sc+bfc.width()+bfc.lsb() < 64) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()+sc, bfc.width()))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc == bfc.getARM64BFlsb()
- // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ // cond: sc == bfc.lsb()
+ // result: (ANDconst [1<<uint(bfc.width())-1] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
}
sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc == bfc.getARM64BFlsb()) {
+ if !(sc == bfc.lsb()) {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.width()) - 1)
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc < bfc.getARM64BFlsb()
- // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ // cond: sc < bfc.lsb()
+ // result: (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
}
sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < bfc.getARM64BFlsb()) {
+ if !(sc < bfc.lsb()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.lsb()-sc, bfc.width()))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
- // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ // cond: sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()
+ // result: (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
for {
bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
}
sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ if !(sc > bfc.lsb() && sc < bfc.lsb()+bfc.width()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc))
v.AddArg(x)
return true
}