Optimize them away if we can.
If not, be more careful about splicing them out after scheduling.
Change-Id: I660e54649d753dc456d2e25d389d375a16d76940
Reviewed-on: https://go-review.googlesource.com/c/go/+/627418
Reviewed-by: Shengwei Zhao <wingrez@126.com>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Keith Randall <khr@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
(NilCheck ptr:(Addr {_} (SB)) _) => ptr
(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr
+// Nil checks of nil checks are redundant.
+// See comment at the end of https://go-review.googlesource.com/c/go/+/537775.
+(NilCheck ptr:(NilCheck _ _) _ ) => ptr
+
// for late-expanded calls, recognize memequal applied to a single constant byte
// Support is limited by 1, 2, 4, 8 byte sizes
(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
v.copyOf(ptr)
return true
}
+ // match: (NilCheck ptr:(NilCheck _ _) _ )
+ // result: ptr
+ for {
+ ptr := v_0
+ if ptr.Op != OpNilCheck {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpNot(v *Value) bool {
for _, b := range f.Blocks {
for _, v := range b.Values {
for i, a := range v.Args {
- if a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck {
- v.SetArg(i, a.Args[0])
+ for a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck {
+ a = a.Args[0]
+ v.SetArg(i, a)
}
}
}
for i, c := range b.ControlValues() {
- if c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck {
- b.ReplaceControl(i, c.Args[0])
+ for c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck {
+ c = c.Args[0]
+ b.ReplaceControl(i, c)
}
}
}