}
func gcBgMarkWorker(_p_ *p) {
- type parkInfo struct {
- m *m // Release this m on park.
- attach *p // If non-nil, attach to this p on park.
- }
- var park parkInfo
-
gp := getg()
- park.m = acquirem()
- park.attach = _p_
+
+ type parkInfo struct {
+ m muintptr // Release this m on park.
+ attach puintptr // If non-nil, attach to this p on park.
+ }
+ // We pass park to a gopark unlock function, so it can't be on
+ // the stack (see gopark). Prevent deadlock from recursively
+ // starting GC by disabling preemption.
+ gp.m.preemptoff = "GC worker init"
+ park := new(parkInfo)
+ gp.m.preemptoff = ""
+
+ park.m.set(acquirem())
+ park.attach.set(_p_)
// Inform gcBgMarkStartWorkers that this worker is ready.
// After this point, the background mark worker is scheduled
// cooperatively by gcController.findRunnable. Hence, it must
// The worker G is no longer running, so it's
// now safe to allow preemption.
- releasem(park.m)
+ releasem(park.m.ptr())
// If the worker isn't attached to its P,
// attach now. During initialization and after
// attach, the owner P may schedule the
// worker, so this must be done after the G is
// stopped.
- if park.attach != nil {
- p := park.attach
- park.attach = nil
+ if park.attach != 0 {
+ p := park.attach.ptr()
+ park.attach.set(nil)
// cas the worker because we may be
// racing with a new worker starting
// on this P.
}
}
return true
- }, noescape(unsafe.Pointer(&park)), "GC worker (idle)", traceEvGoBlock, 0)
+ }, unsafe.Pointer(park), "GC worker (idle)", traceEvGoBlock, 0)
// Loop until the P dies and disassociates this
// worker (the P may later be reused, in which case
// Disable preemption so we can use the gcw. If the
// scheduler wants to preempt us, we'll stop draining,
// dispose the gcw, and then preempt.
- park.m = acquirem()
+ park.m.set(acquirem())
if gcBlackenEnabled == 0 {
throw("gcBgMarkWorker: blackening not enabled")
// findRunnableGCWorker doesn't try to
// schedule it.
_p_.gcBgMarkWorker.set(nil)
- releasem(park.m)
+ releasem(park.m.ptr())
gcMarkDone()
// We may be running on a different P at this
// point, so we can't reattach until this G is
// parked.
- park.m = acquirem()
- park.attach = _p_
+ park.m.set(acquirem())
+ park.attach.set(_p_)
}
}
}
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
+// unlockf must not access this G's stack, as it may be moved between
+// the call to gopark and the call to unlockf.
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
mp := acquirem()
gp := mp.curg
}
}
-func selparkcommit(gp *g, usel unsafe.Pointer) bool {
- sel := (*hselect)(usel)
- scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
- scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
- lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
- lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice))
- selunlock(scases, lockorder)
+func selparkcommit(gp *g, _ unsafe.Pointer) bool {
+ // This must not access gp's stack (see gopark). In
+ // particular, it must not access the *hselect. That's okay,
+ // because by the time this is called, gp.waiting has all
+ // channels in lock order.
+ var lastc *hchan
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ if sg.c != lastc && lastc != nil {
+ // As soon as we unlock the channel, fields in
+ // any sudog with that channel may change,
+ // including c and waitlink. Since multiple
+ // sudogs may have the same channel, we unlock
+ // only after we've passed the last instance
+ // of a channel.
+ unlock(&lastc.lock)
+ }
+ lastc = sg.c
+ }
+ if lastc != nil {
+ unlock(&lastc.lock)
+ }
return true
}
// wait for someone to wake us up
gp.param = nil
- gopark(selparkcommit, unsafe.Pointer(sel), "select", traceEvGoBlockSelect, 2)
+ gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 2)
// someone woke us up
sellock(scases, lockorder)