// sbfiz
// (x << lc) >> rc
(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// int64(x << lc)
(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
// sbfx
// (x << lc) >> rc
(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// int64(x) >> rc
(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
=> (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
// ubfiz
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// uint64(x) << lc
+(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+// uint64(x << lc)
+(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
+
+// merge ANDconst into ubfiz
// (x & ac) << sc
(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFIZ [armBFAuxInt(sc, 32)] x)
-(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFIZ [armBFAuxInt(sc, 16)] x)
-(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFIZ [armBFAuxInt(sc, 8)] x)
// (x << sc) & ac
(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
-(MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
- => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
-(MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
- => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
-(MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
- => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
-// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
// ubfx
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// uint64(x) >> rc
+(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x)
+// uint64(x >> rc)
+(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
+(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
+(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x)
+// merge ANDconst into ubfx
// (x >> sc) & ac
(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFX [armBFAuxInt(sc, 32)] x)
-(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFX [armBFAuxInt(sc, 16)] x)
-(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFX [armBFAuxInt(sc, 8)] x)
// (x & ac) >> sc
(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
-(SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
- => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
-(SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
- => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
-(SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
- => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
-// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
// merge ubfx and zerso-extension into ubfx
(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)
if lsb < 0 || lsb > 63 {
panic("ARM(64) bit field lsb constant out of range")
}
- if width < 1 || width > 64 {
+ if width < 1 || lsb+width > 64 {
panic("ARM(64) bit field width constant out of range")
}
return arm64BitField(width | lsb<<8)
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (MOVBUreg (SLLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<8-1, sc)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ // match: (MOVBUreg (SLLconst [lc] x))
+ // cond: lc < 8
+ // result: (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, sc)) {
+ if !(lc < 8) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
v.AddArg(x)
return true
}
- // match: (MOVBUreg (SRLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<8-1, 0)
- // result: (UBFX [armBFAuxInt(sc, 8)] x)
+ // match: (MOVBUreg (SRLconst [rc] x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, 0)) {
+ if !(rc < 8) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8))
v.AddArg(x)
return true
}
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (MOVHUreg (SLLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<16-1, sc)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ // match: (MOVHUreg (SLLconst [lc] x))
+ // cond: lc < 16
+ // result: (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, sc)) {
+ if !(lc < 16) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
v.AddArg(x)
return true
}
- // match: (MOVHUreg (SRLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<16-1, 0)
- // result: (UBFX [armBFAuxInt(sc, 16)] x)
+ // match: (MOVHUreg (SRLconst [rc] x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, 0)) {
+ if !(rc < 16) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16))
v.AddArg(x)
return true
}
v.AuxInt = int64ToAuxInt(0)
return true
}
- // match: (MOVWUreg (SLLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<32-1, sc)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ // match: (MOVWUreg (SLLconst [lc] x))
+ // cond: lc < 32
+ // result: (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, sc)) {
+ if !(lc < 32) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
v.AddArg(x)
return true
}
- // match: (MOVWUreg (SRLconst [sc] x))
- // cond: isARM64BFMask(sc, 1<<32-1, 0)
- // result: (UBFX [armBFAuxInt(sc, 32)] x)
+ // match: (MOVWUreg (SRLconst [rc] x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
- sc := auxIntToInt64(v_0.AuxInt)
+ rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, 0)) {
+ if !(rc < 32) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32))
v.AddArg(x)
return true
}
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (ANDconst [ac] x))
- // cond: isARM64BFMask(sc, ac, 0)
- // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ // match: (SLLconst [lc] (MOVWUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64ANDconst {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
break
}
- ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, ac, 0)) {
- break
- }
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc)))
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (MOVWUreg x))
- // cond: isARM64BFMask(sc, 1<<32-1, 0)
- // result: (UBFIZ [armBFAuxInt(sc, 32)] x)
+ // match: (SLLconst [lc] (MOVHUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64MOVWUreg {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, 0)) {
- break
- }
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc)))
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (MOVHUreg x))
- // cond: isARM64BFMask(sc, 1<<16-1, 0)
- // result: (UBFIZ [armBFAuxInt(sc, 16)] x)
+ // match: (SLLconst [lc] (MOVBUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64MOVHUreg {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, 0)) {
- break
- }
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc)))
v.AddArg(x)
return true
}
- // match: (SLLconst [sc] (MOVBUreg x))
- // cond: isARM64BFMask(sc, 1<<8-1, 0)
- // result: (UBFIZ [armBFAuxInt(sc, 8)] x)
+ // match: (SLLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64MOVBUreg {
+ if v_0.Op != OpARM64ANDconst {
break
}
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, 0)) {
+ if !(isARM64BFMask(sc, ac, 0)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
v.AddArg(x)
return true
}
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (ANDconst [ac] x))
- // cond: isARM64BFMask(sc, ac, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc < rc
+ // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64ANDconst {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
break
}
- ac := auxIntToInt64(v_0.AuxInt)
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(isARM64BFMask(sc, ac, sc)) {
+ if !(lc < rc) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (MOVWUreg x))
- // cond: isARM64BFMask(sc, 1<<32-1, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ // match: (SRLconst [rc] (MOVWUreg x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<32-1, sc)) {
+ if !(rc < 32) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (MOVHUreg x))
- // cond: isARM64BFMask(sc, 1<<16-1, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ // match: (SRLconst [rc] (MOVHUreg x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<16-1, sc)) {
+ if !(rc < 16) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [sc] (MOVBUreg x))
- // cond: isARM64BFMask(sc, 1<<8-1, sc)
- // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ // match: (SRLconst [rc] (MOVBUreg x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8-rc)] x)
for {
- sc := auxIntToInt64(v.AuxInt)
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
- if !(isARM64BFMask(sc, 1<<8-1, sc)) {
+ if !(rc < 8) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
v.AddArg(x)
return true
}
- // match: (SRLconst [rc] (SLLconst [lc] x))
- // cond: lc < rc
- // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ // match: (SRLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
- rc := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpARM64SLLconst {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
break
}
- lc := auxIntToInt64(v_0.AuxInt)
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(lc < rc) {
+ if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
}
// sbfiz
+// merge shifts into sbfiz: (x << lc) >> rc && lc > rc.
func sbfiz1(x int64) int64 {
// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
return (x << 4) >> 3
}
+// merge shift and sign-extension into sbfiz.
func sbfiz2(x int32) int64 {
return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]29",-"LSL"
}
return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]5",-"LSL"
}
+// sbfiz combinations.
+// merge shift with sbfiz into sbfiz.
func sbfiz5(x int32) int32 {
// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
return (x << 4) >> 3
}
// sbfx
+// merge shifts into sbfx: (x << lc) >> rc && lc <= rc.
func sbfx1(x int64) int64 {
return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
}
return (x << 60) >> 60 // arm64:"SBFX\tZR, R[0-9]+, [$]4",-"LSL",-"ASR"
}
+// merge shift and sign-extension into sbfx.
func sbfx3(x int32) int64 {
return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]29",-"ASR"
}
return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]5",-"ASR"
}
-func sbfx6(x int32) int32 {
+func sbfx6(x int32) int64 {
+ return int64(x >> 30) // arm64:"SBFX\t[$]30, R[0-9]+, [$]2"
+}
+
+func sbfx7(x int16) int64 {
+ return int64(x >> 10) // arm64:"SBFX\t[$]10, R[0-9]+, [$]6"
+}
+
+func sbfx8(x int8) int64 {
+ return int64(x >> 5) // arm64:"SBFX\t[$]5, R[0-9]+, [$]3"
+}
+
+// sbfx combinations.
+// merge shifts with sbfiz into sbfx.
+func sbfx9(x int32) int32 {
return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
}
// merge sbfx and sign-extension into sbfx.
-func sbfx7(x int32) int64 {
+func sbfx10(x int32) int64 {
c := x + 5
return int64(c >> 20) // arm64"SBFX\t[$]20, R[0-9]+, [$]12",-"MOVW\tR[0-9]+, R[0-9]+"
}
// ubfiz
+// merge shifts into ubfiz: (x<<lc)>>rc && lc>rc
func ubfiz1(x uint64) uint64 {
- // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
- // s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
- return (x & 0xfff) << 3
-}
-
-func ubfiz2(x uint64) uint64 {
- // arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
- // s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
- return (x << 4) & 0xfff0
+ // arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR"
+ // s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD"
+ return (x << 4) >> 3
}
-func ubfiz3(x uint32) uint64 {
+// merge shift and zero-extension into ubfiz.
+func ubfiz2(x uint32) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]32",-"LSL"
}
-func ubfiz4(x uint16) uint64 {
+func ubfiz3(x uint16) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL"
}
-func ubfiz5(x uint8) uint64 {
+func ubfiz4(x uint8) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]8",-"LSL"
}
-func ubfiz6(x uint64) uint64 {
- // arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR"
- // s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD"
- return (x << 4) >> 3
+func ubfiz5(x uint8) uint64 {
+ return uint64(x) << 60 // arm64:"UBFIZ\t[$]60, R[0-9]+, [$]4",-"LSL"
+}
+
+func ubfiz6(x uint32) uint64 {
+ return uint64(x << 30) // arm64:"UBFIZ\t[$]30, R[0-9]+, [$]2",
+}
+
+func ubfiz7(x uint16) uint64 {
+ return uint64(x << 10) // arm64:"UBFIZ\t[$]10, R[0-9]+, [$]6",
+}
+
+func ubfiz8(x uint8) uint64 {
+ return uint64(x << 7) // arm64:"UBFIZ\t[$]7, R[0-9]+, [$]1",
+}
+
+// merge ANDconst into ubfiz.
+func ubfiz9(x uint64) uint64 {
+ // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
+ // s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
+ return (x & 0xfff) << 3
+}
+
+func ubfiz10(x uint64) uint64 {
+ // arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
+ // s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
+ return (x << 4) & 0xfff0
}
-func ubfiz7(x uint32) uint32 {
+// ubfiz combinations
+func ubfiz11(x uint32) uint32 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"LSR"
return (x << 4) >> 3
}
-func ubfiz8(x uint64) uint64 {
+func ubfiz12(x uint64) uint64 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]20",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]43, [$]62, [$]1, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 4) >> 3
}
-func ubfiz9(x uint64) uint64 {
+func ubfiz13(x uint64) uint64 {
// arm64:"UBFIZ\t[$]5, R[0-9]+, [$]13",-"LSL",-"LSR",-"AND"
return ((x << 3) & 0xffff) << 2
}
-func ubfiz10(x uint64) uint64 {
+func ubfiz14(x uint64) uint64 {
// arm64:"UBFIZ\t[$]7, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]45, [$]56, [$]7, ",-"SLD",-"SRD",-"AND"
return ((x << 5) & (0xfff << 5)) << 2
}
// ubfx
+// merge shifts into ubfx: (x<<lc)>>rc && lc<rc
func ubfx1(x uint64) uint64 {
- // arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
- // s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
- return (x >> 25) & 1023
-}
-
-func ubfx2(x uint64) uint64 {
- // arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
- // s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
- return (x & 0x0ff0) >> 4
+ // arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
+ // s390x:"RISBGZ\t[$]2, [$]63, [$]63,",-"SLD",-"SRD"
+ return (x << 1) >> 2
}
-func ubfx3(x uint32) uint64 {
+// merge shift and zero-extension into ubfx.
+func ubfx2(x uint32) uint64 {
return uint64(x >> 15) // arm64:"UBFX\t[$]15, R[0-9]+, [$]17",-"LSR"
}
-func ubfx4(x uint16) uint64 {
+func ubfx3(x uint16) uint64 {
return uint64(x >> 9) // arm64:"UBFX\t[$]9, R[0-9]+, [$]7",-"LSR"
}
-func ubfx5(x uint8) uint64 {
+func ubfx4(x uint8) uint64 {
return uint64(x >> 3) // arm64:"UBFX\t[$]3, R[0-9]+, [$]5",-"LSR"
}
-func ubfx6(x uint64) uint64 {
- // arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
- // s390x:"RISBGZ\t[$]2, [$]63, [$]63,",-"SLD",-"SRD"
- return (x << 1) >> 2
+func ubfx5(x uint32) uint64 {
+ return uint64(x) >> 30 // arm64:"UBFX\t[$]30, R[0-9]+, [$]2"
+}
+
+func ubfx6(x uint16) uint64 {
+ return uint64(x) >> 10 // arm64:"UBFX\t[$]10, R[0-9]+, [$]6"
+}
+
+func ubfx7(x uint8) uint64 {
+ return uint64(x) >> 3 // arm64:"UBFX\t[$]3, R[0-9]+, [$]5"
+}
+
+// merge ANDconst into ubfx.
+func ubfx8(x uint64) uint64 {
+ // arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
+ // s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
+ return (x >> 25) & 1023
}
-func ubfx7(x uint32) uint32 {
+func ubfx9(x uint64) uint64 {
+ // arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
+ // s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
+ return (x & 0x0ff0) >> 4
+}
+
+// ubfx combinations.
+func ubfx10(x uint32) uint32 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]30",-"LSL",-"LSR"
return (x << 1) >> 2
}
-func ubfx8(x uint64) uint64 {
+func ubfx11(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]52, [$]63, [$]63,",-"SLD",-"SRD",-"AND"
return ((x << 1) >> 2) & 0xfff
}
-func ubfx9(x uint64) uint64 {
+func ubfx12(x uint64) uint64 {
// arm64:"UBFX\t[$]4, R[0-9]+, [$]11",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]53, [$]63, [$]60, ",-"SLD",-"SRD",-"AND"
return ((x >> 3) & 0xfff) >> 1
}
-func ubfx10(x uint64) uint64 {
+func ubfx13(x uint64) uint64 {
// arm64:"UBFX\t[$]5, R[0-9]+, [$]56",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]8, [$]63, [$]59, ",-"SLD",-"SRD"
return ((x >> 2) << 5) >> 8
}
-func ubfx11(x uint64) uint64 {
+func ubfx14(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]19",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]45, [$]63, [$]63, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 3) >> 4
}
// merge ubfx and zero-extension into ubfx.
-func ubfx12(x uint64) bool {
+func ubfx15(x uint64) bool {
midr := x + 10
part_num := uint16((midr >> 4) & 0xfff)
if part_num == 0xd0c { // arm64:"UBFX\t[$]4, R[0-9]+, [$]12",-"MOVHU\tR[0-9]+, R[0-9]+"