return gcphase == _GCmark || gcphase == _GCmarktermination || mheap_.shadow_enabled
}
+// Write barrier calls must not happen during critical GC and scheduler
+// related operations. In particular there are times when the GC assumes
+// that the world is stopped but scheduler related code is still being
+// executed, dealing with syscalls, dealing with putting gs on runnable
+// queues and so forth. This code can not execute write barriers because
+// the GC might drop them on the floor. Stopping the world involves removing
+// the p associated with an m. We use the fact that m.p == nil to indicate
+// that we are in one these critical section and throw if the write is of
+// a pointer to a heap object.
+// The p, m, and g pointers are the pointers that are used by the scheduler
+// and need to be operated on without write barriers. We use
+// the setPNoWriteBarrier, setMNoWriteBarrier and setGNowriteBarrier to
+// avoid having to do the write barrier.
//go:nosplit
func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
mp := acquirem()
releasem(mp)
return
}
- mp.inwb = true
systemstack(func() {
+ if mp.p == nil && memstats.enablegc && !mp.inwb && inheap(src) {
+ throw("writebarrierptr_nostore1 called with mp.p == nil")
+ }
+ mp.inwb = true
gcmarkwb_m(dst, src)
})
mp.inwb = false
}
// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
+// May run during STW, so write barriers are not allowed.
+// Eliminating WB calls using setGNoWriteBarrier are safe since the gs are
+// reachable through allg.
+//go:nowritebarrier
func netpollready(gpp **g, pd *pollDesc, mode int32) {
var rg, wg *g
if mode == 'r' || mode == 'r'+'w' {
- rg = netpollunblock(pd, 'r', true)
+ setGNoWriteBarrier(&rg, netpollunblock(pd, 'r', true))
}
if mode == 'w' || mode == 'r'+'w' {
- wg = netpollunblock(pd, 'w', true)
+ setGNoWriteBarrier(&wg, netpollunblock(pd, 'w', true))
}
if rg != nil {
- rg.schedlink = *gpp
- *gpp = rg
+ setGNoWriteBarrier(&rg.schedlink, *gpp)
+ setGNoWriteBarrier(gpp, rg)
}
if wg != nil {
- wg.schedlink = *gpp
- *gpp = wg
+ setGNoWriteBarrier(&wg.schedlink, *gpp)
+ setGNoWriteBarrier(gpp, wg)
}
}
initsig()
}
- if _g_.m.mstartfn != nil {
+ if _g_.m.mstartfn != 0 {
fn := *(*func())(unsafe.Pointer(&_g_.m.mstartfn))
fn()
}
}
// Create a new m. It will start off with a call to fn, or else the scheduler.
+// fn needs to be static and not a heap allocated closure.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func newm(fn func(), _p_ *p) {
mp := allocm(_p_)
- mp.nextp = _p_
- mp.mstartfn = *(*unsafe.Pointer)(unsafe.Pointer(&fn))
-
+ // procresize made _p_ reachable through allp, which doesn't change during GC, so WB can be eliminated
+ setPNoWriteBarrier(&mp.nextp, _p_)
+ // Store &fn as a uintptr since it is not heap allocated so the WB can be eliminated
+ mp.mstartfn = *(*uintptr)(unsafe.Pointer(&fn))
if iscgo {
var ts cgothreadstart
if _cgo_thread_start == nil {
throw("_cgo_thread_start missing")
}
- ts.g = mp.g0
+ // mp is reachable via allm and mp.g0 never changes, so WB can be eliminated.
+ setGNoWriteBarrier(&ts.g, mp.g0)
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
ts.fn = unsafe.Pointer(funcPC(mstart))
asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
// Schedules some M to run the p (creates an M if necessary).
// If p==nil, tries to get an idle P, if no idle P's does nothing.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func startm(_p_ *p, spinning bool) {
lock(&sched.lock)
if _p_ == nil {
throw("startm: m has p")
}
mp.spinning = spinning
- mp.nextp = _p_
+ // procresize made _p_ reachable through allp, which doesn't change during GC, so WB can be eliminated
+ setPNoWriteBarrier(&mp.nextp, _p_)
notewakeup(&mp.park)
}
}
// Schedules the locked m to run the locked gp.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func startlockedm(gp *g) {
_g_ := getg()
// directly handoff current P to the locked m
incidlelocked(-1)
_p_ := releasep()
- mp.nextp = _p_
+ // procresize made _p_ reachable through allp, which doesn't change during GC, so WB can be eliminated
+ setPNoWriteBarrier(&mp.nextp, _p_)
notewakeup(&mp.park)
stopm()
}
for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
osyield()
}
- systemstack(traceGoSysExit)
+ // This can't be done since the GC may be running and this code
+ // will invoke write barriers.
+ // TODO: Figure out how to get traceGoSysExit into the trace log or
+ // it is likely not to work as expected.
+ // systemstack(traceGoSysExit)
}
_g_.m.locks--
}
// Associate p and the current m.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func acquirep(_p_ *p) {
_g_ := getg()
print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
throw("acquirep: invalid p state")
}
- _g_.m.mcache = _p_.mcache
- _g_.m.p = _p_
- _p_.m = _g_.m
+ // _p_.mcache holds the mcache and _p_ is in allp, so WB can be eliminated
+ setMcacheNoWriteBarrier(&_g_.m.mcache, _p_.mcache)
+ // _p_ is in allp so WB can be eliminated
+ setPNoWriteBarrier(&_g_.m.p, _p_)
+ // m is in _g_.m and is reachable through allg, so WB can be eliminated
+ setMNoWriteBarrier(&_p_.m, _g_.m)
_p_.status = _Prunning
if trace.enabled {
// Put mp on midle list.
// Sched must be locked.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func mput(mp *m) {
- mp.schedlink = sched.midle
- sched.midle = mp
+ // sched.midle is reachable via allm, so WB can be eliminated.
+ setMNoWriteBarrier(&mp.schedlink, sched.midle)
+ // mp is reachable via allm, so WB can be eliminated.
+ setMNoWriteBarrier(&sched.midle, mp)
sched.nmidle++
checkdead()
}
// Try to get an m from midle list.
// Sched must be locked.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func mget() *m {
mp := sched.midle
if mp != nil {
- sched.midle = mp.schedlink
+ // mp.schedlink is reachable via mp, which is on allm, so WB can be eliminated.
+ setMNoWriteBarrier(&sched.midle, mp.schedlink)
sched.nmidle--
}
return mp
// Put gp on the global runnable queue.
// Sched must be locked.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func globrunqput(gp *g) {
gp.schedlink = nil
if sched.runqtail != nil {
- sched.runqtail.schedlink = gp
+ // gp is on allg, so these three WBs can be eliminated.
+ setGNoWriteBarrier(&sched.runqtail.schedlink, gp)
} else {
- sched.runqhead = gp
+ setGNoWriteBarrier(&sched.runqhead, gp)
}
- sched.runqtail = gp
+ setGNoWriteBarrier(&sched.runqtail, gp)
sched.runqsize++
}
// Put p to on _Pidle list.
// Sched must be locked.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func pidleput(_p_ *p) {
- _p_.link = sched.pidle
- sched.pidle = _p_
+ // sched.pidle, _p_.link and _p_ are reachable via allp, so WB can be eliminated.
+ setPNoWriteBarrier(&_p_.link, sched.pidle)
+ setPNoWriteBarrier(&sched.pidle, _p_)
xadd(&sched.npidle, 1) // TODO: fast atomic
}
// Try get a p from _Pidle list.
// Sched must be locked.
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
func pidleget() *p {
_p_ := sched.pidle
if _p_ != nil {
- sched.pidle = _p_.link
+ // _p_.link is reachable via a _p_ in allp, so WB can be eliminated.
+ setPNoWriteBarrier(&sched.pidle, _p_.link)
xadd(&sched.npidle, -1) // TODO: fast atomic
}
return _p_
return (*g)(unsafe.Pointer(gp))
}
+// ps, ms, gs, and mcache are structures that must be manipulated at a level
+// lower than that of the normal Go language. For example the routine that
+// stops the world removes the p from the m structure informing the GC that
+// this P is stopped and then it moves the g to the global runnable queue.
+// If write barriers were allowed to happen at this point not only does
+// the GC think the thread is stopped but the underlying structures
+// like a p or m are not in a state that is not coherent enough to
+// support the write barrier actions.
+// This is particularly painful since a partially executed write barrier
+// may mark the object but be delinquent in informing the GC that the
+// object needs to be scanned.
+
+// setGNoWriteBarriers does *gdst = gval without a write barrier.
+func setGNoWriteBarrier(gdst **g, gval *g) {
+ *(*uintptr)(unsafe.Pointer(gdst)) = uintptr(unsafe.Pointer(gval))
+}
+
+// setMNoWriteBarriers does *mdst = mval without a write barrier.
+func setMNoWriteBarrier(mdst **m, mval *m) {
+ *(*uintptr)(unsafe.Pointer(mdst)) = uintptr(unsafe.Pointer(mval))
+}
+
+// setPNoWriteBarriers does *pdst = pval without a write barrier.
+func setPNoWriteBarrier(pdst **p, pval *p) {
+ *(*uintptr)(unsafe.Pointer(pdst)) = uintptr(unsafe.Pointer(pval))
+}
+
+// setMcacheNoWriteBarriers does *mcachedst = mcacheval without a write barrier.
+func setMcacheNoWriteBarrier(mcachedst **mcache, mcacheval *mcache) {
+ *(*uintptr)(unsafe.Pointer(mcachedst)) = uintptr(unsafe.Pointer(mcacheval))
+}
+
type gobuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
sp uintptr
morebuf gobuf // gobuf arg to morestack
// Fields not known to debuggers.
- procid uint64 // for debuggers, but offset not hard-coded
- gsignal *g // signal-handling g
- tls [4]uintptr // thread-local storage (for x86 extern register)
- mstartfn unsafe.Pointer // todo go func()
- curg *g // current running goroutine
- caughtsig *g // goroutine running during fatal signal
- p *p // attached p for executing go code (nil if not executing go code)
+ procid uint64 // for debuggers, but offset not hard-coded
+ gsignal *g // signal-handling g
+ tls [4]uintptr // thread-local storage (for x86 extern register)
+ mstartfn uintptr // TODO: type as func(); note: this is a non-heap allocated func()
+ curg *g // current running goroutine
+ caughtsig *g // goroutine running during fatal signal
+ p *p // attached p for executing go code (nil if not executing go code)
nextp *p
id int32
mallocing int32