p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = c.ctxt.StartUnsafePoint(p, c.newprog)
-
if framesize <= abi.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
- end := c.ctxt.EndUnsafePoint(bls, c.newprog, -1)
-
var last *obj.Prog
for last = c.cursym.Func().Text; last.Link != nil; last = last.Link {
}
spfix.Spadj = -framesize
pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog)
- pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog)
// MOVW LR, R3
movw := obj.Appendp(pcdata, c.newprog)
}
call.To.Sym = c.ctxt.Lookup(morestack)
- pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1)
-
// B start
- b := obj.Appendp(pcdata, c.newprog)
+ b := obj.Appendp(call, c.newprog)
b.As = obj.AJMP
b.To.Type = obj.TYPE_BRANCH
b.To.SetTarget(startPred.Link)
b.Spadj = +framesize
- return end
+ return bls
}
var unaryDst = map[obj.As]bool{
p.To.Type = obj.TYPE_REG
p.To.Reg = REGRT1
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = c.ctxt.StartUnsafePoint(p, c.newprog)
-
q := (*obj.Prog)(nil)
if framesize <= abi.StackSmall {
// small stack: SP < stackguard
bls.As = ABLS
bls.To.Type = obj.TYPE_BRANCH
- end := c.ctxt.EndUnsafePoint(bls, c.newprog, -1)
-
var last *obj.Prog
for last = c.cursym.Func().Text; last.Link != nil; last = last.Link {
}
spfix.Spadj = -framesize
pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog)
- pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog)
if q != nil {
q.To.SetTarget(pcdata)
}
call.To.Sym = c.ctxt.Lookup(morestack)
- // The instructions which unspill regs should be preemptible.
- pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1)
- unspill := c.cursym.Func().UnspillRegisterArgs(pcdata, c.newprog)
+ unspill := c.cursym.Func().UnspillRegisterArgs(call, c.newprog)
// B start
jmp := obj.Appendp(unspill, c.newprog)
jmp.To.SetTarget(startPred.Link)
jmp.Spadj = +framesize
- return end
+ return bls
}
func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R20
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = c.ctxt.StartUnsafePoint(p, c.newprog)
-
var q *obj.Prog
if framesize <= abi.StackSmall {
// small stack: SP < stackguard
p.To.Type = obj.TYPE_BRANCH
p.Mark |= BRANCH
- end := c.ctxt.EndUnsafePoint(p, c.newprog, -1)
+ end := p
var last *obj.Prog
for last = c.cursym.Func().Text; last.Link != nil; last = last.Link {
spfix.Spadj = -framesize
pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog)
- pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog)
if q != nil {
q.To.SetTarget(pcdata)
}
call.Mark |= BRANCH
- // The instructions which unspill regs should be preemptible.
- pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1)
unspill := c.cursym.Func().UnspillRegisterArgs(pcdata, c.newprog)
// JMP start
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = c.ctxt.StartUnsafePoint(p, c.newprog)
-
var q *obj.Prog
if framesize <= abi.StackSmall {
// small stack: SP < stackguard
}
p.Mark |= BRANCH
- p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
-
// JMP start
p = obj.Appendp(p, c.newprog)
s.setFIPSType(ctxt)
}
-// EmitEntryLiveness generates PCDATA Progs after p to switch to the
-// liveness map active at the entry of function s. It returns the last
-// Prog generated.
-func (ctxt *Link) EmitEntryLiveness(s *LSym, p *Prog, newprog ProgAlloc) *Prog {
- pcdata := ctxt.EmitEntryStackMap(s, p, newprog)
- pcdata = ctxt.EmitEntryUnsafePoint(s, pcdata, newprog)
- return pcdata
-}
-
-// Similar to EmitEntryLiveness, but just emit stack map.
+// EmitEntryStackMap generates PCDATA Progs after p to switch to the
+// liveness map active at the entry of function s.
func (ctxt *Link) EmitEntryStackMap(s *LSym, p *Prog, newprog ProgAlloc) *Prog {
pcdata := Appendp(p, newprog)
pcdata.Pos = s.Func().Text.Pos
return pcdata
}
-// Similar to EmitEntryLiveness, but just emit unsafe point map.
-func (ctxt *Link) EmitEntryUnsafePoint(s *LSym, p *Prog, newprog ProgAlloc) *Prog {
- pcdata := Appendp(p, newprog)
- pcdata.Pos = s.Func().Text.Pos
- pcdata.As = APCDATA
- pcdata.From.Type = TYPE_CONST
- pcdata.From.Offset = abi.PCDATA_UnsafePoint
- pcdata.To.Type = TYPE_CONST
- pcdata.To.Offset = -1
-
- return pcdata
-}
-
// StartUnsafePoint generates PCDATA Progs after p to mark the
// beginning of an unsafe point. The unsafe point starts immediately
// after p.
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R22
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = c.ctxt.StartUnsafePoint(p, c.newprog)
-
var q *obj.Prog
if framesize <= abi.StackSmall {
// small stack: SP < stackguard
p.To.Reg = REG_R2
}
- // The instructions which unspill regs should be preemptible.
- p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
unspill := c.cursym.Func().UnspillRegisterArgs(p, c.newprog)
// BR start
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_X6
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = ctxt.StartUnsafePoint(p, newprog)
-
var to_done, to_more *obj.Prog
if framesize <= abi.StackSmall {
}
jalToSym(ctxt, p, REG_X5)
- // The instructions which unspill regs should be preemptible.
- p = ctxt.EndUnsafePoint(p, newprog, -1)
p = cursym.Func().UnspillRegisterArgs(p, newprog)
// JMP start
if !p.From.Sym.NoSplit() {
p, pPreempt, pCheck = c.stacksplitPre(p, autosize) // emit pre part of split check
pPre = p
- p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
wasSplit = true //need post part of split
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = c.ctxt.StartUnsafePoint(p, c.newprog)
-
if framesize <= abi.StackSmall {
// small stack: SP < stackguard
// CMPUBGE stackguard, SP, label-of-call-to-morestack
spfix.Spadj = -framesize
pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog)
- pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog)
// MOVD LR, R5
p = obj.Appendp(pcdata, c.newprog)
p.To.Sym = c.ctxt.Lookup("runtime.morestack")
}
- p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
-
// BR pCheck
p = obj.Appendp(p, c.newprog)
p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
}
- // Mark the stack bound check and morestack call async nonpreemptible.
- // If we get preempted here, when resumed the preemption request is
- // cleared, but we'll still call morestack, which will double the stack
- // unnecessarily. See issue #35470.
- p = ctxt.StartUnsafePoint(p, newprog)
} else if framesize <= abi.StackBig {
// large stack: SP-framesize <= stackguard-StackSmall
// LEAQ -xxx(SP), tmp
p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
}
- p = ctxt.StartUnsafePoint(p, newprog) // see the comment above
} else {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
p.To.Type = obj.TYPE_REG
p.To.Reg = tmp
- p = ctxt.StartUnsafePoint(p, newprog) // see the comment above
-
p = obj.Appendp(p, newprog)
p.As = sub
p.From.Type = obj.TYPE_CONST
jls.As = AJLS
jls.To.Type = obj.TYPE_BRANCH
- end := ctxt.EndUnsafePoint(jls, newprog, -1)
-
var last *obj.Prog
for last = cursym.Func().Text; last.Link != nil; last = last.Link {
}
spfix.As = obj.ANOP
spfix.Spadj = -framesize
- pcdata := ctxt.EmitEntryStackMap(cursym, spfix, newprog)
- spill := ctxt.StartUnsafePoint(pcdata, newprog)
- pcdata = cursym.Func().SpillRegisterArgs(spill, newprog)
+ spill := ctxt.EmitEntryStackMap(cursym, spfix, newprog)
+ pcdata := cursym.Func().SpillRegisterArgs(spill, newprog)
call := obj.Appendp(pcdata, newprog)
call.Pos = cursym.Func().Text.Pos
progedit(ctxt, callend.Link, newprog)
}
- // The instructions which unspill regs should be preemptible.
- pcdata = ctxt.EndUnsafePoint(callend, newprog, -1)
- unspill := cursym.Func().UnspillRegisterArgs(pcdata, newprog)
+ unspill := cursym.Func().UnspillRegisterArgs(callend, newprog)
jmp := obj.Appendp(unspill, newprog)
jmp.As = obj.AJMP
q1.To.SetTarget(spill)
}
- return end, rg
+ return jls, rg
}
func isR15(r int16) bool {
// _Gscan bit and thus own the stack.
gp.preemptStop = false
gp.preempt = false
+ gp.preemptRecent = true
gp.stackguard0 = gp.stack.lo + stackGuard
// The goroutine was already at a safe-point
runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
lockedm muintptr
fipsIndicator uint8
- sig uint32
- writebuf []byte
- sigcode0 uintptr
- sigcode1 uintptr
- sigpc uintptr
- parentGoid uint64 // goid of goroutine that created this goroutine
- gopc uintptr // pc of go statement that created this goroutine
- ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
- startpc uintptr // pc of goroutine function
- racectx uintptr
- waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
- cgoCtxt []uintptr // cgo traceback context
- labels unsafe.Pointer // profiler labels
- timer *timer // cached timer for time.Sleep
- sleepWhen int64 // when to sleep until
- selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
+
+ // preemptRecent is set when a goroutine is preempted. It is
+ // reset by code passing through the synchronous preemption
+ // path. It is used to avoid growing the stack when we were
+ // just preempting, see issue 35470.
+ preemptRecent bool
+
+ sig uint32
+ writebuf []byte
+ sigcode0 uintptr
+ sigcode1 uintptr
+ sigpc uintptr
+ parentGoid uint64 // goid of goroutine that created this goroutine
+ gopc uintptr // pc of go statement that created this goroutine
+ ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
+ startpc uintptr // pc of goroutine function
+ racectx uintptr
+ waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
+ cgoCtxt []uintptr // cgo traceback context
+ labels unsafe.Pointer // profiler labels
+ timer *timer // cached timer for time.Sleep
+ sleepWhen int64 // when to sleep until
+ selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
// goroutineProfiled indicates the status of this goroutine's stack for the
// current in-progress goroutine profile
gopreempt_m(gp) // never return
}
+ if stackguard0 == gp.stack.lo+stackGuard && gp.preemptRecent {
+ // The case happens because of an interaction between synchronous
+ // and asynchronous preemption. First, we set the cooperative
+ // preemption signal (g.stackguard0 = stackPreempt), and as a
+ // result the function fails the stack check and enters its
+ // morestack path. If it gets suspended at that point, we might
+ // give up waiting for it and send an async preempt. That async
+ // preempt gets processed and clears the cooperative preemption
+ // signal (g.stackguard0 = g.stack.lo+stackGuard) and resumes
+ // the function. But even though the cooperative preemption
+ // signal is cleared, we're already on the morestack path and
+ // can't avoid calling morestack. See issue 35470.
+ //
+ // To avoid this problem, if we've been preempted recently,
+ // clear the "preempted recently" flag and resume the G.
+ // If we really did need more stack, the morestack check will
+ // immediately fail and we'll get back here to try again (with
+ // preemptRecent==false, so we don't take this case the
+ // second time).
+ gp.preemptRecent = false
+ gogo(&gp.sched) // never return
+ }
+
// Allocate a bigger segment and move the stack.
oldsize := gp.stack.hi - gp.stack.lo
newsize := oldsize * 2