gp.waiting = mysg
gp.param = nil
c.sendq.enqueue(mysg)
- goparkunlock(&c.lock, waitReasonChanSend, traceEvGoBlockSend, 3)
+ gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
// Ensure the value being sent is kept alive until the
// receiver copies it out. The sudog has a pointer to the
// stack object, but sudogs aren't considered as roots of the
throw("G waiting list is corrupted")
}
gp.waiting = nil
+ gp.activeStackChans = false
if gp.param == nil {
if c.closed == 0 {
throw("chansend: spurious wakeup")
mysg.c = c
gp.param = nil
c.recvq.enqueue(mysg)
- goparkunlock(&c.lock, waitReasonChanReceive, traceEvGoBlockRecv, 3)
+ gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
// someone woke us up
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
gp.waiting = nil
+ gp.activeStackChans = false
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
goready(gp, skip+1)
}
+func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
+ // There are unlocked sudogs that point into gp's stack. Stack
+ // copying must lock the channels of those sudogs.
+ gp.activeStackChans = true
+ unlock((*mutex)(chanLock))
+ return true
+}
+
// compiler implements
//
// select {
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
- _panic *_panic // innermost panic - offset known to liblink
- _defer *_defer // innermost defer
- m *m // current m; offset known to arm liblink
- sched gobuf
- syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
- syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
- stktopsp uintptr // expected sp at top of stack, to check in traceback
- param unsafe.Pointer // passed parameter on wakeup
- atomicstatus uint32
- stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
- goid int64
- schedlink guintptr
- waitsince int64 // approx time when the g become blocked
- waitreason waitReason // if status==Gwaiting
- preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
- preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
- paniconfault bool // panic (instead of crash) on unexpected fault address
- gcscandone bool // g has scanned stack; protected by _Gscan bit in status
- throwsplit bool // must not split stack
- raceignore int8 // ignore race detection events
- sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
- sysexitticks int64 // cputicks when syscall has returned (for tracing)
- traceseq uint64 // trace event sequencer
- tracelastp puintptr // last P emitted an event for this goroutine
+ _panic *_panic // innermost panic - offset known to liblink
+ _defer *_defer // innermost defer
+ m *m // current m; offset known to arm liblink
+ sched gobuf
+ syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
+ syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
+ stktopsp uintptr // expected sp at top of stack, to check in traceback
+ param unsafe.Pointer // passed parameter on wakeup
+ atomicstatus uint32
+ stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
+ goid int64
+ schedlink guintptr
+ waitsince int64 // approx time when the g become blocked
+ waitreason waitReason // if status==Gwaiting
+ preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
+ preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
+ paniconfault bool // panic (instead of crash) on unexpected fault address
+ gcscandone bool // g has scanned stack; protected by _Gscan bit in status
+ throwsplit bool // must not split stack
+ // activeStackChans indicates that there are unlocked channels
+ // pointing into this goroutine's stack. If true, stack
+ // copying needs to acquire channel locks to protect these
+ // areas of the stack.
+ activeStackChans bool
+
+ raceignore int8 // ignore race detection events
+ sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
+ sysexitticks int64 // cputicks when syscall has returned (for tracing)
+ traceseq uint64 // trace event sequencer
+ tracelastp puintptr // last P emitted an event for this goroutine
lockedm muintptr
sig uint32
writebuf []byte
}
func selparkcommit(gp *g, _ unsafe.Pointer) bool {
+ // There are unlocked sudogs that point into gp's stack. Stack
+ // copying must lock the channels of those sudogs.
+ gp.activeStackChans = true
// This must not access gp's stack (see gopark). In
// particular, it must not access the *hselect. That's okay,
// because by the time this is called, gp.waiting has all
// wait for someone to wake us up
gp.param = nil
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
+ gp.activeStackChans = false
sellock(scases, lockorder)
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {runtime.G{}, 212, 368}, // g, but exported for testing
+ {runtime.G{}, 216, 376}, // g, but exported for testing
}
for _, tt := range tests {
}
// Lock channels to prevent concurrent send/receive.
- // It's important that we *only* do this for async
- // copystack; otherwise, gp may be in the middle of
- // putting itself on wait queues and this would
- // self-deadlock.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c != lastc {
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
-//
-// If sync is true, this is a self-triggered stack growth and, in
-// particular, no other G may be writing to gp's stack (e.g., via a
-// channel operation). If sync is false, copystack protects against
-// concurrent channel operations.
-func copystack(gp *g, newsize uintptr, sync bool) {
+func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
throw("stack growth not allowed in system call")
}
// Adjust sudogs, synchronizing with channel ops if necessary.
ncopy := used
- if sync {
+ if !gp.activeStackChans {
adjustsudogs(gp, &adjinfo)
} else {
- // sudogs can point in to the stack. During concurrent
- // shrinking, these areas may be written to. Find the
- // highest such pointer so we can handle everything
- // there and below carefully. (This shouldn't be far
- // from the bottom of the stack, so there's little
- // cost in handling everything below it carefully.)
+ // sudogs may be pointing in to the stack and gp has
+ // released channel locks, so other goroutines could
+ // be writing to gp's stack. Find the highest such
+ // pointer so we can handle everything there and below
+ // carefully. (This shouldn't be far from the bottom
+ // of the stack, so there's little cost in handling
+ // everything below it carefully.)
adjinfo.sghi = findsghi(gp, old)
// Synchronize with channel ops and copy the part of
// The concurrent GC will not scan the stack while we are doing the copy since
// the gp is in a Gcopystack status.
- copystack(gp, newsize, true)
+ copystack(gp, newsize)
if stackDebug >= 1 {
print("stack grow done\n")
}
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
- copystack(gp, newsize, false)
+ copystack(gp, newsize)
}
// freeStackSpans frees unused stack spans at the end of GC.