// there was neither enough credit to steal or enough work to
// do.
assistQueue struct {
- lock mutex
- head, tail guintptr
+ lock mutex
+ q gQueue
}
// sweepWaiters is a list of blocked goroutines to wake when
// new assists from going to sleep after this point.
func gcWakeAllAssists() {
lock(&work.assistQueue.lock)
- injectglist(work.assistQueue.head.ptr())
- work.assistQueue.head.set(nil)
- work.assistQueue.tail.set(nil)
+ injectglist(work.assistQueue.q.popList().head.ptr())
unlock(&work.assistQueue.lock)
}
}
gp := getg()
- oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail
- if oldHead == 0 {
- work.assistQueue.head.set(gp)
- } else {
- oldTail.ptr().schedlink.set(gp)
- }
- work.assistQueue.tail.set(gp)
- gp.schedlink.set(nil)
+ oldList := work.assistQueue.q
+ work.assistQueue.q.pushBack(gp)
// Recheck for background credit now that this G is in
// the queue, but can still back out. This avoids a
// race in case background marking has flushed more
// credit since we checked above.
if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
- work.assistQueue.head = oldHead
- work.assistQueue.tail = oldTail
- if oldTail != 0 {
- oldTail.ptr().schedlink.set(nil)
+ work.assistQueue.q = oldList
+ if oldList.tail != 0 {
+ oldList.tail.ptr().schedlink.set(nil)
}
unlock(&work.assistQueue.lock)
return false
//
//go:nowritebarrierrec
func gcFlushBgCredit(scanWork int64) {
- if work.assistQueue.head == 0 {
+ if work.assistQueue.q.empty() {
// Fast path; there are no blocked assists. There's a
// small window here where an assist may add itself to
// the blocked queue and park. If that happens, we'll
scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
lock(&work.assistQueue.lock)
- gp := work.assistQueue.head.ptr()
- for gp != nil && scanBytes > 0 {
+ for !work.assistQueue.q.empty() && scanBytes > 0 {
+ gp := work.assistQueue.q.pop()
// Note that gp.gcAssistBytes is negative because gp
// is in debt. Think carefully about the signs below.
if scanBytes+gp.gcAssistBytes >= 0 {
// Satisfy this entire assist debt.
scanBytes += gp.gcAssistBytes
gp.gcAssistBytes = 0
- xgp := gp
- gp = gp.schedlink.ptr()
- // It's important that we *not* put xgp in
+ // It's important that we *not* put gp in
// runnext. Otherwise, it's possible for user
// code to exploit the GC worker's high
// scheduler priority to get itself always run
// before other goroutines and always in the
// fresh quantum started by GC.
- ready(xgp, 0, false)
+ ready(gp, 0, false)
} else {
// Partially satisfy this assist.
gp.gcAssistBytes += scanBytes
// back of the queue so that large assists
// can't clog up the assist queue and
// substantially delay small assists.
- xgp := gp
- gp = gp.schedlink.ptr()
- if gp == nil {
- // gp is the only assist in the queue.
- gp = xgp
- } else {
- xgp.schedlink = 0
- work.assistQueue.tail.ptr().schedlink.set(xgp)
- work.assistQueue.tail.set(xgp)
- }
+ work.assistQueue.q.pushBack(gp)
break
}
}
- work.assistQueue.head.set(gp)
- if gp == nil {
- work.assistQueue.tail.set(nil)
- }
if scanBytes > 0 {
// Convert from scan bytes back to work.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func globrunqput(gp *g) {
- gp.schedlink = 0
- if sched.runqtail != 0 {
- sched.runqtail.ptr().schedlink.set(gp)
- } else {
- sched.runqhead.set(gp)
- }
- sched.runqtail.set(gp)
+ sched.runq.pushBack(gp)
sched.runqsize++
}
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func globrunqputhead(gp *g) {
- gp.schedlink = sched.runqhead
- sched.runqhead.set(gp)
- if sched.runqtail == 0 {
- sched.runqtail.set(gp)
- }
+ sched.runq.push(gp)
sched.runqsize++
}
// Put a batch of runnable goroutines on the global runnable queue.
+// This clears *batch.
// Sched must be locked.
-func globrunqputbatch(ghead *g, gtail *g, n int32) {
- gtail.schedlink = 0
- if sched.runqtail != 0 {
- sched.runqtail.ptr().schedlink.set(ghead)
- } else {
- sched.runqhead.set(ghead)
- }
- sched.runqtail.set(gtail)
+func globrunqputbatch(batch *gQueue, n int32) {
+ sched.runq.pushBackAll(*batch)
sched.runqsize += n
+ *batch = gQueue{}
}
// Try get a batch of G's from the global runnable queue.
}
sched.runqsize -= n
- if sched.runqsize == 0 {
- sched.runqtail = 0
- }
- gp := sched.runqhead.ptr()
- sched.runqhead = gp.schedlink
+ gp := sched.runq.pop()
n--
for ; n > 0; n-- {
- gp1 := sched.runqhead.ptr()
- sched.runqhead = gp1.schedlink
+ gp1 := sched.runq.pop()
runqput(_p_, gp1, false)
}
return gp
for i := uint32(0); i < n; i++ {
batch[i].schedlink.set(batch[i+1])
}
+ var q gQueue
+ q.head.set(batch[0])
+ q.tail.set(batch[n])
// Now put the batch on global queue.
lock(&sched.lock)
- globrunqputbatch(batch[0], batch[n], int32(n+1))
+ globrunqputbatch(&q, int32(n+1))
unlock(&sched.lock)
return true
}
return gp
}
+// A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
+// be on one gQueue or gList at a time.
+type gQueue struct {
+ head guintptr
+ tail guintptr
+}
+
+// empty returns true if q is empty.
+func (q *gQueue) empty() bool {
+ return q.head == 0
+}
+
+// push adds gp to the head of q.
+func (q *gQueue) push(gp *g) {
+ gp.schedlink = q.head
+ q.head.set(gp)
+ if q.tail == 0 {
+ q.tail.set(gp)
+ }
+}
+
+// pushBack adds gp to the tail of q.
+func (q *gQueue) pushBack(gp *g) {
+ gp.schedlink = 0
+ if q.tail != 0 {
+ q.tail.ptr().schedlink.set(gp)
+ } else {
+ q.head.set(gp)
+ }
+ q.tail.set(gp)
+}
+
+// pushBackAll adds all Gs in l2 to the tail of q. After this q2 must
+// not be used.
+func (q *gQueue) pushBackAll(q2 gQueue) {
+ if q2.tail == 0 {
+ return
+ }
+ q2.tail.ptr().schedlink = 0
+ if q.tail != 0 {
+ q.tail.ptr().schedlink = q2.head
+ } else {
+ q.head = q2.head
+ }
+ q.tail = q2.tail
+}
+
+// pop removes and returns the head of queue q. It returns nil if
+// q is empty.
+func (q *gQueue) pop() *g {
+ gp := q.head.ptr()
+ if gp != nil {
+ q.head = gp.schedlink
+ if q.head == 0 {
+ q.tail = 0
+ }
+ }
+ return gp
+}
+
+// popList takes all Gs in q and returns them as a gList.
+func (q *gQueue) popList() gList {
+ stack := gList{q.head}
+ *q = gQueue{}
+ return stack
+}
+
+// A gList is a list of Gs linked through g.schedlink. A G can only be
+// on one gQueue or gList at a time.
+type gList struct {
+ head guintptr
+}
+
+// empty returns true if l is empty.
+func (l *gList) empty() bool {
+ return l.head == 0
+}
+
+// push adds gp to the head of l.
+func (l *gList) push(gp *g) {
+ gp.schedlink = l.head
+ l.head.set(gp)
+}
+
+// pushAll prepends all Gs in q to l.
+func (l *gList) pushAll(q gQueue) {
+ if !q.empty() {
+ q.tail.ptr().schedlink = l.head
+ l.head = q.head
+ }
+}
+
+// pop removes and returns the head of l. If l is empty, it returns nil.
+func (l *gList) pop() *g {
+ gp := l.head.ptr()
+ if gp != nil {
+ l.head = gp.schedlink
+ }
+ return gp
+}
+
//go:linkname setMaxThreads runtime/debug.setMaxThreads
func setMaxThreads(in int) (out int) {
lock(&sched.lock)
nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
// Global runnable queue.
- runqhead guintptr
- runqtail guintptr
+ runq gQueue
runqsize int32
// Global cache of dead G's.