(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(ANDconst [m] (SRDconst x [s])) && mergePPC64AndSrdi(m,s) != 0 => (RLWINM [mergePPC64AndSrdi(m,s)] x)
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(AND (MOVDconst [m]) (SRDconst x [s])) && mergePPC64AndSrdi(m,s) != 0 => (RLWINM [mergePPC64AndSrdi(m,s)] x)
(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+(ANDconst [m] (SLDconst x [s])) && mergePPC64AndSldi(m,s) != 0 => (RLWINM [mergePPC64AndSldi(m,s)] x)
+(AND (MOVDconst [m]) (SLDconst x [s])) && mergePPC64AndSldi(m,s) != 0 => (RLWINM [mergePPC64AndSldi(m,s)] x)
+
// Merge shift right + shift left and clear left (e.g for a table lookup)
(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
(CLRLSLDI [c] (SRDconst [s] x)) && mergePPC64ClrlsldiSrd(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrd(int64(c),s)] x)
return encodePPC64RotateMask((32-s)&31, mask, 32)
}
+// Combine (ANDconst [m] (SRDconst [s])) into (RLWINM [y]) or return 0
+func mergePPC64AndSrdi(m, s int64) int64 {
+ mask := mergePPC64RShiftMask(m, s, 64)
+
+ // Verify the rotate and mask result only uses the lower 32 bits.
+ rv := bits.RotateLeft64(0xFFFFFFFF00000000, -int(s))
+ if rv&uint64(mask) != 0 {
+ return 0
+ }
+ if !isPPC64WordRotateMask(mask) {
+ return 0
+ }
+ return encodePPC64RotateMask((32-s)&31, mask, 32)
+}
+
+// Combine (ANDconst [m] (SLDconst [s])) into (RLWINM [y]) or return 0
+func mergePPC64AndSldi(m, s int64) int64 {
+ mask := -1 << s & m
+
+ // Verify the rotate and mask result only uses the lower 32 bits.
+ rv := bits.RotateLeft64(0xFFFFFFFF00000000, int(s))
+ if rv&uint64(mask) != 0 {
+ return 0
+ }
+ if !isPPC64WordRotateMask(mask) {
+ return 0
+ }
+ return encodePPC64RotateMask(s&31, mask, 32)
+}
+
// Test if a word shift right feeding into a CLRLSLDI can be merged into RLWINM.
// Return the encoded RLWINM constant, or 0 if they cannot be merged.
func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
}
break
}
+ // match: (AND (MOVDconst [m]) (SRDconst x [s]))
+ // cond: mergePPC64AndSrdi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrdi(m,s)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRDconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(mergePPC64AndSrdi(m, s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrdi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SLDconst x [s]))
+ // cond: mergePPC64AndSldi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSldi(m,s)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SLDconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(mergePPC64AndSldi(m, s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSldi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
// match: (AND x (NOR y y))
// result: (ANDN x y)
for {
v.AddArg(x)
return true
}
+ // match: (ANDconst [m] (SRDconst x [s]))
+ // cond: mergePPC64AndSrdi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrdi(m,s)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrdi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrdi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (SLDconst x [s]))
+ // cond: mergePPC64AndSldi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSldi(m,s)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SLDconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSldi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSldi(m, s))
+ v.AddArg(x)
+ return true
+ }
// match: (ANDconst [c] (ANDconst [d] x))
// result: (ANDconst [c&d] x)
for {
a[2] = a[v>>25&0x7F]
// ppc64x: -"CLRLSLDI", "RLWNM\t[$]3, R[0-9]+, [$]29, [$]29, R[0-9]+"
a[3] = a[(v>>31)&0x01]
- // ppc64x: "SRD", "CLRLSLDI", -"RLWNM"
- a[4] = a[(v>>30)&0x07]
- // ppc64x: "SRD", "CLRLSLDI", -"RLWNM"
- a[5] = a[(v>>32)&0x01]
- // ppc64x: "SRD", "CLRLSLDI", -"RLWNM"
- a[6] = a[(v>>34)&0x03]
// ppc64x: -"CLRLSLDI", "RLWNM\t[$]12, R[0-9]+, [$]21, [$]28, R[0-9]+"
b[0] = b[uint8(v>>23)]
// ppc64x: -"CLRLSLDI", "RLWNM\t[$]15, R[0-9]+, [$]21, [$]28, R[0-9]+"
b[2] = b[((uint64((uint32(v) >> 21)) & 0x3f) << 4)]
// ppc64x: "RLWNM\t[$]11, R[0-9]+, [$]10, [$]15"
c[0] = c[((v>>5)&0x3F)<<16]
- // ppc64x: "RLWNM\t[$]0, R[0-9]+, [$]19, [$]24"
+ // ppc64x: "ANDCC\t[$]8064,"
c[1] = c[((v>>7)&0x3F)<<7]
}