(MOVBreg (MOVBZreg x)) => (MOVBreg x)
(MOVBZreg (MOVBreg x)) => (MOVBZreg x)
+// Catch any remaining rotate+shift cases
+(MOVBZreg (SRWconst x [s])) && mergePPC64AndSrwi(0xFF,s) != 0 => (RLWINM [mergePPC64AndSrwi(0xFF,s)] x)
+(MOVBZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
+(MOVHZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
+(MOVWZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFFFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFFFFFF,r)] y)
+(Select0 (ANDCCconst [m] (RLWINM [r] y))) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
+(SLDconst [s] (RLWINM [r] y)) && mergePPC64SldiRlwinm(s,r) != 0 => (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
+(RLWINM [r] (MOVHZreg u)) && mergePPC64RlwinmAnd(r,0xFFFF) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
+(RLWINM [r] (Select0 (ANDCCconst [a] u))) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
+// SLWconst is a special case of RLWNM which always zero-extends the result.
+(SLWconst [s] (MOVWZreg w)) => (SLWconst [s] w)
+(MOVWZreg w:(SLWconst u)) => w
+
// H - there are more combinations than these
(MOVHZreg y:(MOV(H|B)Zreg _)) => y // repeat
return encodePPC64RotateMask(r_3, int64(mask_3), 32)
}
+// Test if RLWINM feeding into an ANDconst can be merged. Return the encoded RLWINM constant,
+// or 0 if they cannot be merged.
+func mergePPC64AndRlwinm(mask uint32, rlw int64) int64 {
+ r, _, _, mask_rlw := DecodePPC64RotateMask(rlw)
+ mask_out := (mask_rlw & uint64(mask))
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_out)) {
+ return 0
+ }
+ return encodePPC64RotateMask(r, int64(mask_out), 32)
+}
+
+// Test if AND feeding into an ANDconst can be merged. Return the encoded RLWINM constant,
+// or 0 if they cannot be merged.
+func mergePPC64RlwinmAnd(rlw int64, mask uint32) int64 {
+ r, _, _, mask_rlw := DecodePPC64RotateMask(rlw)
+
+ // Rotate the input mask, combine with the rlwnm mask, and test if it is still a valid rlwinm mask.
+ r_mask := bits.RotateLeft32(mask, int(r))
+
+ mask_out := (mask_rlw & uint64(r_mask))
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_out)) {
+ return 0
+ }
+ return encodePPC64RotateMask(r, int64(mask_out), 32)
+}
+
+// Test if RLWINM feeding into SRDconst can be merged. Return the encoded RLIWNM constant,
+// or 0 if they cannot be merged.
+func mergePPC64SldiRlwinm(sldi, rlw int64) int64 {
+ r_1, mb, me, mask_1 := DecodePPC64RotateMask(rlw)
+ if mb > me || mb < sldi {
+ // Wrapping masks cannot be merged as the upper 32 bits are effectively undefined in this case.
+ // Likewise, if mb is less than the shift amount, it cannot be merged.
+ return 0
+ }
+ // combine the masks, and adjust for the final left shift.
+ mask_3 := mask_1 << sldi
+ r_3 := (r_1 + sldi) & 31 // This can wrap.
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if uint64(uint32(mask_3)) != mask_3 {
+ return 0
+ }
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
+}
+
// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
// or return 0 if they cannot be combined.
func mergePPC64SldiSrw(sld, srw int64) int64 {
return rewriteValuePPC64_OpPPC64ORN(v)
case OpPPC64ORconst:
return rewriteValuePPC64_OpPPC64ORconst(v)
+ case OpPPC64RLWINM:
+ return rewriteValuePPC64_OpPPC64RLWINM(v)
case OpPPC64ROTL:
return rewriteValuePPC64_OpPPC64ROTL(v)
case OpPPC64ROTLW:
v.AddArg(x)
return true
}
+ // match: (MOVBZreg (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(0xFF,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(0xFF,s)] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(0xFF, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(0xFF, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (RLWINM [r] y))
+ // cond: mergePPC64AndRlwinm(0xFF,r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
+ for {
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64AndRlwinm(0xFF, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(0xFF, r))
+ v.AddArg(y)
+ return true
+ }
// match: (MOVBZreg (OR <t> x (MOVWZreg y)))
// result: (MOVBZreg (OR <t> x y))
for {
v.AddArg(x)
return true
}
+ // match: (MOVHZreg (RLWINM [r] y))
+ // cond: mergePPC64AndRlwinm(0xFFFF,r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
+ for {
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64AndRlwinm(0xFFFF, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(0xFFFF, r))
+ v.AddArg(y)
+ return true
+ }
// match: (MOVHZreg y:(MOVHZreg _))
// result: y
for {
v.AddArg(x)
return true
}
+ // match: (MOVWZreg (RLWINM [r] y))
+ // cond: mergePPC64AndRlwinm(0xFFFFFFFF,r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(0xFFFFFFFF,r)] y)
+ for {
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64AndRlwinm(0xFFFFFFFF, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(0xFFFFFFFF, r))
+ v.AddArg(y)
+ return true
+ }
+ // match: (MOVWZreg w:(SLWconst u))
+ // result: w
+ for {
+ w := v_0
+ if w.Op != OpPPC64SLWconst {
+ break
+ }
+ v.copyOf(w)
+ return true
+ }
// match: (MOVWZreg y:(MOVWZreg _))
// result: y
for {
}
return false
}
+func rewriteValuePPC64_OpPPC64RLWINM(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RLWINM [r] (MOVHZreg u))
+ // cond: mergePPC64RlwinmAnd(r,0xFFFF) != 0
+ // result: (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ u := v_0.Args[0]
+ if !(mergePPC64RlwinmAnd(r, 0xFFFF) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64RlwinmAnd(r, 0xFFFF))
+ v.AddArg(u)
+ return true
+ }
+ // match: (RLWINM [r] (Select0 (ANDCCconst [a] u)))
+ // cond: mergePPC64RlwinmAnd(r,uint32(a)) != 0
+ // result: (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ a := auxIntToInt64(v_0_0.AuxInt)
+ u := v_0_0.Args[0]
+ if !(mergePPC64RlwinmAnd(r, uint32(a)) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64RlwinmAnd(r, uint32(a)))
+ v.AddArg(u)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64ROTL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
v.AddArg(x)
return true
}
+ // match: (SLDconst [s] (RLWINM [r] y))
+ // cond: mergePPC64SldiRlwinm(s,r) != 0
+ // result: (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if !(mergePPC64SldiRlwinm(s, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64SldiRlwinm(s, r))
+ v.AddArg(y)
+ return true
+ }
// match: (SLDconst [c] z:(MOVBZreg x))
// cond: c < 8 && z.Uses == 1
// result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
}
func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
v_0 := v.Args[0]
+ // match: (SLWconst [s] (MOVWZreg w))
+ // result: (SLWconst [s] w)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ w := v_0.Args[0]
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg(w)
+ return true
+ }
// match: (SLWconst [c] z:(MOVBZreg x))
// cond: z.Uses == 1 && c < 8
// result: (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
v.AddArg(v0)
return true
}
+ // match: (Select0 (ANDCCconst [m] (RLWINM [r] y)))
+ // cond: mergePPC64AndRlwinm(uint32(m),r) != 0
+ // result: (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64RLWINM {
+ break
+ }
+ r := auxIntToInt64(v_0_0.AuxInt)
+ y := v_0_0.Args[0]
+ if !(mergePPC64AndRlwinm(uint32(m), r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndRlwinm(uint32(m), r))
+ v.AddArg(y)
+ return true
+ }
// match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x)))
// cond: z.Uses == 1
// result: (SRDconst [63] x)
// ppc64x: "SRD", "CLRLSLDI", -"RLWNM"
a[5] = a[(v>>32)&0x01]
// ppc64x: "SRD", "CLRLSLDI", -"RLWNM"
- a[5] = a[(v>>34)&0x03]
+ a[6] = a[(v>>34)&0x03]
// ppc64x: -"CLRLSLDI", "RLWNM\t[$]12, R[0-9]+, [$]21, [$]28, R[0-9]+"
b[0] = b[uint8(v>>23)]
// ppc64x: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+"
b[1] = b[(v>>20)&0xFF]
+ // ppc64x: "RLWNM", -"SLD"
+ b[2] = b[((uint64((uint32(v) >> 21)) & 0x3f) << 4)]
+}
+
+func checkShiftMask(a uint32, b uint64, z []uint32, y []uint64) {
+ _ = y[128]
+ _ = z[128]
+ // ppc64x: -"MOVBZ", -"SRW", "RLWNM"
+ z[0] = uint32(uint8(a >> 5))
+ // ppc64x: -"MOVBZ", -"SRW", "RLWNM"
+ z[1] = uint32(uint8((a >> 4) & 0x7e))
+ // ppc64x: "RLWNM\t[$]25, R[0-9]+, [$]27, [$]29, R[0-9]+"
+ z[2] = uint32(uint8(a>>7)) & 0x1c
+ // ppc64x: -"MOVWZ"
+ y[0] = uint64((a >> 6) & 0x1c)
+ // ppc64x: -"MOVWZ"
+ y[1] = uint64(uint32(b)<<6) + 1
+ // ppc64x: -"MOVHZ", -"MOVWZ"
+ y[2] = uint64((uint16(a) >> 9) & 0x1F)
+ // ppc64x: -"MOVHZ", -"MOVWZ", -"ANDCC"
+ y[3] = uint64(((uint16(a) & 0xFF0) >> 9) & 0x1F)
}
// 128 bit shifts