(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
&& sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
=> (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+(SBFX [bfc] s:(SLLconst [sc] x))
+ && s.Uses == 1
+ && sc <= bfc.getARM64BFlsb()
+ => (SBFX [armBFAuxInt(bfc.getARM64BFlsb() - sc, bfc.getARM64BFwidth())] x)
+(SBFX [bfc] s:(SLLconst [sc] x))
+ && s.Uses == 1
+ && sc > bfc.getARM64BFlsb()
+ => (SBFIZ [armBFAuxInt(sc - bfc.getARM64BFlsb(), bfc.getARM64BFwidth() - (sc-bfc.getARM64BFlsb()))] x)
// ubfiz
// (x << lc) >> rc
(UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x)
(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0) =>
(UBFX [bfc] x)
-// merge ubfx and zerso-extension into ubfx
+// merge ubfx and zero-extension into ubfx
(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)
(MOVHUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (UBFX [bfc] x)
(MOVBUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (UBFX [bfc] x)
+// Extracting bits from across a zero-extension boundary.
+(UBFX [bfc] e:(MOVWUreg x))
+ && e.Uses == 1
+ && bfc.getARM64BFlsb() < 32
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb()))] x)
+(UBFX [bfc] e:(MOVHUreg x))
+ && e.Uses == 1
+ && bfc.getARM64BFlsb() < 16
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb()))] x)
+(UBFX [bfc] e:(MOVBUreg x))
+ && e.Uses == 1
+ && bfc.getARM64BFlsb() < 8
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb()))] x)
+
// ubfiz/ubfx combinations: merge shifts into bitfield ops
(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
&& ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
=> (BFI [bfc] y x)
+(ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
+ && xc == ^(yc << s) // opposite masks
+ && yc & (yc+1) == 0 // power of 2 minus 1
+ && yc > 0 // not 0, not all 64 bits (there are better rewrites in that case)
+ && s+log64(yc+1) <= 64 // shifted mask doesn't overflow
+ => (BFI [armBFAuxInt(s, log64(yc+1))] x y)
(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
=> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
return rewriteValueARM64_OpARM64RORW(v)
case OpARM64SBCSflags:
return rewriteValueARM64_OpARM64SBCSflags(v)
+ case OpARM64SBFX:
+ return rewriteValueARM64_OpARM64SBFX(v)
case OpARM64SLL:
return rewriteValueARM64_OpARM64SLL(v)
case OpARM64SLLconst:
v.AddArg2(x2, x)
return true
}
+ // match: (ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
+ // cond: xc == ^(yc << s) && yc & (yc+1) == 0 && yc > 0 && s+log64(yc+1) <= 64
+ // result: (BFI [armBFAuxInt(s, log64(yc+1))] x y)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ xc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ yc := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(xc == ^(yc<<s) && yc&(yc+1) == 0 && yc > 0 && s+log64(yc+1) <= 64) {
+ break
+ }
+ v.reset(OpARM64BFI)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(s, log64(yc+1)))
+ v.AddArg2(x, y)
+ return true
+ }
// match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
// cond: sc == bfc.getARM64BFwidth()
// result: (BFXIL [bfc] y x)
}
return false
}
+func rewriteValueARM64_OpARM64SBFX(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBFX [bfc] s:(SLLconst [sc] x))
+ // cond: s.Uses == 1 && sc <= bfc.getARM64BFlsb()
+ // result: (SBFX [armBFAuxInt(bfc.getARM64BFlsb() - sc, bfc.getARM64BFwidth())] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ s := v_0
+ if s.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(s.AuxInt)
+ x := s.Args[0]
+ if !(s.Uses == 1 && sc <= bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SBFX [bfc] s:(SLLconst [sc] x))
+ // cond: s.Uses == 1 && sc > bfc.getARM64BFlsb()
+ // result: (SBFIZ [armBFAuxInt(sc - bfc.getARM64BFlsb(), bfc.getARM64BFwidth() - (sc-bfc.getARM64BFlsb()))] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ s := v_0
+ if s.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(s.AuxInt)
+ x := s.Args[0]
+ if !(s.Uses == 1 && sc > bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFwidth()-(sc-bfc.getARM64BFlsb())))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64SLL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
v.AddArg(x)
return true
}
+ // match: (UBFX [bfc] e:(MOVWUreg x))
+ // cond: e.Uses == 1 && bfc.getARM64BFlsb() < 32
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb()))] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ e := v_0
+ if e.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := e.Args[0]
+ if !(e.Uses == 1 && bfc.getARM64BFlsb() < 32) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 32-bfc.getARM64BFlsb())))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] e:(MOVHUreg x))
+ // cond: e.Uses == 1 && bfc.getARM64BFlsb() < 16
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb()))] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ e := v_0
+ if e.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := e.Args[0]
+ if !(e.Uses == 1 && bfc.getARM64BFlsb() < 16) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 16-bfc.getARM64BFlsb())))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] e:(MOVBUreg x))
+ // cond: e.Uses == 1 && bfc.getARM64BFlsb() < 8
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb()))] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ e := v_0
+ if e.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := e.Args[0]
+ if !(e.Uses == 1 && bfc.getARM64BFlsb() < 8) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), 8-bfc.getARM64BFlsb())))
+ v.AddArg(x)
+ return true
+ }
// match: (UBFX [bfc] (SRLconst [sc] x))
// cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)