From: khr@golang.org Date: Thu, 19 Sep 2024 18:16:19 +0000 (-0700) Subject: cmd/compile: small cleanups to rewrite rule helpers X-Git-Tag: go1.24rc1~845 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=944a2ac3c7995b25944e00efc907f2410cb1f024;p=gostls13.git cmd/compile: small cleanups to rewrite rule helpers Change-Id: I50a19bd971176598bf8e4ef86ec98f008abe245c Reviewed-on: https://go-review.googlesource.com/c/go/+/615198 Reviewed-by: David Chase Reviewed-by: Cuong Manh Le LUCI-TryBot-Result: Go LUCI Auto-Submit: Keith Randall Reviewed-by: Keith Randall --- diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules index 323ec520fa..1ff60823b4 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules @@ -610,9 +610,9 @@ (MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x)) (MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x)) -(MOV(WZ|W)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 32 => (S(R|RA)Wconst [c] x) -(MOV(HZ|H)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 16 => (S(R|RA)Wconst [c] x) -(MOV(BZ|B)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) == 8 => (S(R|RA)Wconst [c] x) +(MOV(WZ|W)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() <= 32 => (S(R|RA)Wconst [c] x) +(MOV(HZ|H)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() <= 16 => (S(R|RA)Wconst [c] x) +(MOV(BZ|B)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() == 8 => (S(R|RA)Wconst [c] x) // initial right shift will handle sign/zero extend (MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 3f4e41e68c..243c54e5a8 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -742,10 +742,10 @@ => x // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits -(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))]) -(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))]) -(Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))]) -(Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))]) +(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))]) +(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))]) +(Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))]) +(Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))]) // Float Loads up to Zeros so they can be constant folded. (Load op:(OffPtr [o1] p1) diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 687f6a8537..045e571652 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -1263,10 +1263,6 @@ func overlap(offset1, size1, offset2, size2 int64) bool { return false } -func areAdjacentOffsets(off1, off2, size int64) bool { - return off1+size == off2 || off1 == off2+size -} - // check if value zeroes out upper 32-bit of 64-bit register. // depth limits recursion depth. In AMD64.rules 3 is used as limit, // because it catches same amount of cases as 4. @@ -1823,12 +1819,6 @@ func arm64BFWidth(mask, rshift int64) int64 { return nto(shiftedMask) } -// sizeof returns the size of t in bytes. -// It will panic if t is not a *types.Type. -func sizeof(t interface{}) int64 { - return t.(*types.Type).Size() -} - // registerizable reports whether t is a primitive type that fits in // a register. It assumes float64 values will always fit into registers // even if that isn't strictly true. diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 0811566114..62bba98068 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -6812,7 +6812,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { return true } // match: (MOVBZreg (SRWconst [c] x)) - // cond: sizeof(x.Type) == 8 + // cond: x.Type.Size() == 8 // result: (SRWconst [c] x) for { if v_0.Op != OpPPC64SRWconst { @@ -6820,7 +6820,7 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] - if !(sizeof(x.Type) == 8) { + if !(x.Type.Size() == 8) { break } v.reset(OpPPC64SRWconst) @@ -7267,7 +7267,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { return true } // match: (MOVBreg (SRAWconst [c] x)) - // cond: sizeof(x.Type) == 8 + // cond: x.Type.Size() == 8 // result: (SRAWconst [c] x) for { if v_0.Op != OpPPC64SRAWconst { @@ -7275,7 +7275,7 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] - if !(sizeof(x.Type) == 8) { + if !(x.Type.Size() == 8) { break } v.reset(OpPPC64SRAWconst) @@ -8700,7 +8700,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { return true } // match: (MOVHZreg (SRWconst [c] x)) - // cond: sizeof(x.Type) <= 16 + // cond: x.Type.Size() <= 16 // result: (SRWconst [c] x) for { if v_0.Op != OpPPC64SRWconst { @@ -8708,7 +8708,7 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] - if !(sizeof(x.Type) <= 16) { + if !(x.Type.Size() <= 16) { break } v.reset(OpPPC64SRWconst) @@ -9239,7 +9239,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { return true } // match: (MOVHreg (SRAWconst [c] x)) - // cond: sizeof(x.Type) <= 16 + // cond: x.Type.Size() <= 16 // result: (SRAWconst [c] x) for { if v_0.Op != OpPPC64SRAWconst { @@ -9247,7 +9247,7 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] - if !(sizeof(x.Type) <= 16) { + if !(x.Type.Size() <= 16) { break } v.reset(OpPPC64SRAWconst) @@ -10068,7 +10068,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { return true } // match: (MOVWZreg (SRWconst [c] x)) - // cond: sizeof(x.Type) <= 32 + // cond: x.Type.Size() <= 32 // result: (SRWconst [c] x) for { if v_0.Op != OpPPC64SRWconst { @@ -10076,7 +10076,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] - if !(sizeof(x.Type) <= 32) { + if !(x.Type.Size() <= 32) { break } v.reset(OpPPC64SRWconst) @@ -10638,7 +10638,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { return true } // match: (MOVWreg (SRAWconst [c] x)) - // cond: sizeof(x.Type) <= 32 + // cond: x.Type.Size() <= 32 // result: (SRAWconst [c] x) for { if v_0.Op != OpPPC64SRAWconst { @@ -10646,7 +10646,7 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] - if !(sizeof(x.Type) <= 32) { + if !(x.Type.Size() <= 32) { break } v.reset(OpPPC64SRAWconst) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index dee39eb261..760c55fca8 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -13559,7 +13559,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const64 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) + // cond: isSamePtr(p1,p2) && t2.Size() == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) // result: (Const64F [math.Float64frombits(uint64(x))]) for { t1 := v.Type @@ -13575,7 +13575,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := auxIntToInt64(v_1_1.AuxInt) - if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) { + if !(isSamePtr(p1, p2) && t2.Size() == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) { break } v.reset(OpConst64F) @@ -13583,7 +13583,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) + // cond: isSamePtr(p1,p2) && t2.Size() == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) // result: (Const32F [math.Float32frombits(uint32(x))]) for { t1 := v.Type @@ -13599,7 +13599,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := auxIntToInt32(v_1_1.AuxInt) - if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) { + if !(isSamePtr(p1, p2) && t2.Size() == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) { break } v.reset(OpConst32F) @@ -13607,7 +13607,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const64F [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) + // cond: isSamePtr(p1,p2) && t2.Size() == 8 && is64BitInt(t1) // result: (Const64 [int64(math.Float64bits(x))]) for { t1 := v.Type @@ -13623,7 +13623,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := auxIntToFloat64(v_1_1.AuxInt) - if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitInt(t1)) { + if !(isSamePtr(p1, p2) && t2.Size() == 8 && is64BitInt(t1)) { break } v.reset(OpConst64) @@ -13631,7 +13631,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const32F [x]) _)) - // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) + // cond: isSamePtr(p1,p2) && t2.Size() == 4 && is32BitInt(t1) // result: (Const32 [int32(math.Float32bits(x))]) for { t1 := v.Type @@ -13647,7 +13647,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { break } x := auxIntToFloat32(v_1_1.AuxInt) - if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitInt(t1)) { + if !(isSamePtr(p1, p2) && t2.Size() == 4 && is32BitInt(t1)) { break } v.reset(OpConst32)