ABISelf *abi.ABIConfig // ABI for function being compiled
ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions.
- scheduled bool // Values in Blocks are in final order
- laidout bool // Blocks are ordered
- NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
- dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
- IsPgoHot bool
+ scheduled bool // Values in Blocks are in final order
+ laidout bool // Blocks are ordered
+ NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
+ dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
+ IsPgoHot bool
+ HasDeferRangeFunc bool // if true, needs a deferreturn so deferrangefunc can use it for recover() return PC
// when register allocation is done, maps value ids to locations
RegAlloc []Location
callABI = s.f.ABI1
}
}
+ if fn := n.Fun.Sym().Name; n.Fun.Sym().Pkg == ir.Pkgs.Runtime && fn == "deferrangefunc" {
+ s.f.HasDeferRangeFunc = true
+ }
break
}
closure = s.expr(fn)
// nop (which will never execute) after the call.
Arch.Ginsnop(s.pp)
}
- if openDeferInfo != nil {
+ if openDeferInfo != nil || f.HasDeferRangeFunc {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
+ //
+ // deferrangefunc needs to be sure that at least one of these exists;
+ // if all returns are dead-code eliminated, there might not be.
s.pp.NextLive = s.livenessMap.DeferReturn
p := s.pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
throw("defer on system stack")
}
+ fn := findfunc(sys.GetCallerPC())
+ if fn.deferreturn == 0 {
+ throw("no deferreturn")
+ }
+
d := newdefer()
d.link = gp._defer
gp._defer = d
- d.pc = sys.GetCallerPC()
+ d.pc = fn.entry() + uintptr(fn.deferreturn)
// We must not be preempted between calling GetCallerSP and
// storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
// only gets us to the caller's fp.
gp.sched.bp = sp - goarch.PtrSize
}
+ // The value in ret is delivered IN A REGISTER, even if there is a
+ // stack ABI.
gp.sched.ret = 1
gogo(&gp.sched)
}
--- /dev/null
+// run
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package main
+
+//go:noinline
+func i() {
+ for range yieldInts {
+ defer func() {
+ println("I")
+ recover()
+ }()
+ }
+ // This panic causes dead code elimination of the return block.
+ // The compiler should nonetheless emit a deferreturn.
+ panic("i panic")
+}
+
+//go:noinline
+func h() {
+ defer func() {
+ println("H first")
+ }()
+ for range yieldInts {
+ defer func() {
+ println("H second")
+ }()
+ }
+ defer func() {
+ println("H third")
+ }()
+ for range yieldIntsPanic {
+ defer func() {
+ println("h recover:called")
+ recover()
+ }()
+ }
+}
+
+//go:noinline
+func yieldInts(yield func(int) bool) {
+ if !yield(0) {
+ return
+ }
+}
+
+//go:noinline
+func g() {
+ defer func() {
+ println("G first")
+ }()
+ for range yieldIntsPanic {
+ defer func() {
+ println("g recover:called")
+ recover()
+ }()
+ }
+}
+
+//go:noinline
+func yieldIntsPanic(yield func(int) bool) {
+ if !yield(0) {
+ return
+ }
+ panic("yield stop")
+}
+
+//go:noinline
+func next(i int) int {
+ if i == 0 {
+ panic("next stop")
+ }
+ return i + 1
+}
+
+//go:noinline
+func f() {
+ defer func() {
+ println("F first")
+ }()
+ for i := 0; i < 1; i = next(i) {
+ defer func() {
+ println("f recover:called")
+ recover()
+ }()
+ }
+}
+func main() {
+ f()
+ println("f returned")
+ g()
+ println("g returned")
+ h()
+ println("h returned")
+ i()
+ println("i returned")
+
+}
--- /dev/null
+f recover:called
+F first
+f returned
+g recover:called
+G first
+g returned
+h recover:called
+H third
+H second
+H first
+h returned
+I
+i returned