// been set and must not be clobbered.
}
+// deferrangefunc is called by functions that are about to
+// execute a range-over-function loop in which the loop body
+// may execute a defer statement. That defer needs to add to
+// the chain for the current function, not the func literal synthesized
+// to represent the loop body. To do that, the original function
+// calls deferrangefunc to obtain an opaque token representing
+// the current frame, and then the loop body uses deferprocat
+// instead of deferproc to add to that frame's defer lists.
+//
+// The token is an 'any' with underlying type *atomic.Pointer[_defer].
+// It is the atomically-updated head of a linked list of _defer structs
+// representing deferred calls. At the same time, we create a _defer
+// struct on the main g._defer list with d.head set to this head pointer.
+//
+// The g._defer list is now a linked list of deferred calls,
+// but an atomic list hanging off:
+//
+// g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
+// | .head
+// |
+// +--> dY -> dX -> nil
+//
+// with each -> indicating a d.link pointer, and where drangefunc
+// has the d.rangefunc = true bit set.
+// Note that the function being ranged over may have added
+// its own defers (d4 and d3), so drangefunc need not be at the
+// top of the list when deferprocat is used. This is why we pass
+// the atomic head explicitly.
+//
+// To keep misbehaving programs from crashing the runtime,
+// deferprocat pushes new defers onto the .head list atomically.
+// The fact that it is a separate list from the main goroutine
+// defer list means that the main goroutine's defers can still
+// be handled non-atomically.
+//
+// In the diagram, dY and dX are meant to be processed when
+// drangefunc would be processed, which is to say the defer order
+// should be d4, d3, dY, dX, d2, d1. To make that happen,
+// when defer processing reaches a d with rangefunc=true,
+// it calls deferconvert to atomically take the extras
+// away from d.head and then adds them to the main list.
+//
+// That is, deferconvert changes this list:
+//
+// g._defer => drangefunc -> d2 -> d1 -> nil
+// | .head
+// |
+// +--> dY -> dX -> nil
+//
+// into this list:
+//
+// g._defer => dY -> dX -> d2 -> d1 -> nil
+//
+// It also poisons *drangefunc.head so that any future
+// deferprocat using that head will throw.
+// (The atomic head is ordinary garbage collected memory so that
+// it's not a problem if user code holds onto it beyond
+// the lifetime of drangefunc.)
+//
+// TODO: We could arrange for the compiler to call into the
+// runtime after the loop finishes normally, to do an eager
+// deferconvert, which would catch calling the loop body
+// and having it defer after the loop is done. If we have a
+// more general catch of loop body misuse, though, this
+// might not be worth worrying about in addition.
+//
+// See also ../cmd/compile/internal/rangefunc/rewrite.go.
+func deferrangefunc() any {
+ gp := getg()
+ if gp.m.curg != gp {
+ // go code on the system stack can't defer
+ throw("defer on system stack")
+ }
+
+ d := newdefer()
+ d.link = gp._defer
+ gp._defer = d
+ d.pc = getcallerpc()
+ // We must not be preempted between calling getcallersp and
+ // storing it to d.sp because getcallersp's result is a
+ // uintptr stack pointer.
+ d.sp = getcallersp()
+
+ d.rangefunc = true
+ d.head = new(atomic.Pointer[_defer])
+
+ return d.head
+}
+
+// badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
+func badDefer() *_defer {
+ return (*_defer)(unsafe.Pointer(uintptr(1)))
+}
+
+// deferprocat is like deferproc but adds to the atomic list represented by frame.
+// See the doc comment for deferrangefunc for details.
+func deferprocat(fn func(), frame any) {
+ head := frame.(*atomic.Pointer[_defer])
+ if raceenabled {
+ racewritepc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferprocat))
+ }
+ d1 := newdefer()
+ d1.fn = fn
+ for {
+ d1.link = head.Load()
+ if d1.link == badDefer() {
+ throw("defer after range func returned")
+ }
+ if head.CompareAndSwap(d1.link, d1) {
+ break
+ }
+ }
+
+ // Must be last - see deferproc above.
+ return0()
+}
+
+// deferconvert converts a rangefunc defer list into an ordinary list.
+// See the doc comment for deferrangefunc for details.
+func deferconvert(d *_defer) *_defer {
+ head := d.head
+ if raceenabled {
+ racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert))
+ }
+ tail := d.link
+ d.rangefunc = false
+ d0 := d
+
+ for {
+ d = head.Load()
+ if head.CompareAndSwap(d, badDefer()) {
+ break
+ }
+ }
+ if d == nil {
+ freedefer(d0)
+ return tail
+ }
+ for d1 := d; ; d1 = d1.link {
+ d1.sp = d0.sp
+ d1.pc = d0.pc
+ if d1.link == nil {
+ d1.link = tail
+ break
+ }
+ }
+ freedefer(d0)
+ return d
+}
+
// deferprocStack queues a new deferred function with a defer record on the stack.
// The defer record must have its fn field initialized.
// All other fields can contain junk.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
d.heap = false
+ d.rangefunc = false
d.sp = getcallersp()
d.pc = getcallerpc()
// The lines below implement:
// d.panic = nil
// d.fd = nil
// d.link = gp._defer
+ // d.head = nil
// gp._defer = d
// But without write barriers. The first three are writes to
// the stack so they don't need a write barrier, and furthermore
// explicitly mark all the defer structures, so we don't need to
// keep track of pointers to them with a write barrier.
*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
+ *(*uintptr)(unsafe.Pointer(&d.head)) = 0
*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
return0()
return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
}
+ Recheck:
if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
+ if d.rangefunc {
+ gp._defer = deferconvert(d)
+ goto Recheck
+ }
+
fn := d.fn
d.fn = nil