func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
- case ssa.BlockDefer:
- // defer returns in rax:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(x86.ATESTL)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_AX
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_AX
- p = s.Prog(x86.AJNE)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
-
- case ssa.BlockDefer:
- // defer returns in R0:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(arm.ACMP)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 0
- p.Reg = arm.REG_R0
- p = s.Prog(arm.ABNE)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
-
- case ssa.BlockDefer:
- // defer returns in R0:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(arm64.ACMP)
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 0
- p.Reg = arm64.REG_R0
- p = s.Prog(arm64.ABNE)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
- case ssa.BlockDefer:
- // defer returns in R19:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(loong64.ABNE)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = loong64.REGZERO
- p.Reg = loong64.REG_R19
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
- case ssa.BlockDefer:
- // defer returns in R1:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(mips.ABNE)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = mips.REGZERO
- p.Reg = mips.REG_R1
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
- case ssa.BlockDefer:
- // defer returns in R1:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(mips.ABNE)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = mips.REGZERO
- p.Reg = mips.REG_R1
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockDefer:
- // defer returns in R3:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(ppc64.ACMP)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = ppc64.REG_R3
- p.To.Type = obj.TYPE_CONST
- p.To.Offset = 0
-
- p = s.Prog(ppc64.ABNE)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
-
- case ssa.BlockPlain:
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.SetPos(b.Pos)
switch b.Kind {
- case ssa.BlockDefer:
- // defer returns in A0:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(riscv.ABNE)
- p.To.Type = obj.TYPE_BRANCH
- p.From.Type = obj.TYPE_REG
- p.From.Reg = riscv.REG_ZERO
- p.Reg = riscv.REG_A0
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
- case ssa.BlockPlain:
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
// Handle generic blocks first.
switch b.Kind {
- case ssa.BlockPlain:
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
return
- case ssa.BlockDefer:
- // defer returns in R3:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Br(s390x.ACIJ, b.Succs[1].Block())
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
- p.Reg = s390x.REG_R3
- p.AddRestSourceConst(0)
- if b.Succs[0].Block() != next {
- s.Br(s390x.ABR, b.Succs[0].Block())
- }
- return
case ssa.BlockExit, ssa.BlockRetJmp:
return
case ssa.BlockRet:
{name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory.
}
-// kind controls successors implicit exit
-// ----------------------------------------------------------
-// Exit [return mem] [] yes
-// Ret [return mem] [] yes
-// RetJmp [return mem] [] yes
-// Plain [] [next]
-// If [boolean Value] [then, else]
-// First [] [always, never]
-// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
-// JumpTable [integer Value] [succ1,succ2,..]
+// kind controls successors implicit exit
+// ------------------------------------------------------------
+// Exit [return mem] [] yes
+// Ret [return mem] [] yes
+// RetJmp [return mem] [] yes
+// Plain [] [next]
+// If [boolean Value] [then, else]
+// First [] [always, never]
+// Defer [mem] [nopanic, recovery] (control opcode should be OpStaticCall to runtime.defer*)
+// JumpTable [integer Value] [succ1,succ2,..]
var genericBlocks = []blockData{
{name: "Plain"}, // a single successor
{name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1]
- {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
+ {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovery branch (jmp performed by runtime). Controls[0] is call op (of memory type).
{name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result
{name: "RetJmp", controls: 1}, // no successors, Controls[0] value is a tail call
{name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic
ABISelf *abi.ABIConfig // ABI for function being compiled
ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions.
- scheduled bool // Values in Blocks are in final order
- laidout bool // Blocks are ordered
- NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
- dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
- IsPgoHot bool
- HasDeferRangeFunc bool // if true, needs a deferreturn so deferrangefunc can use it for recover() return PC
+ scheduled bool // Values in Blocks are in final order
+ laidout bool // Blocks are ordered
+ NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
+ dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
+ IsPgoHot bool
+ DeferReturn *Block // avoid creating more than one deferreturn if there's multiple calls to deferproc-etc.
// when register allocation is done, maps value ids to locations
RegAlloc []Location
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that we don't track correctly.
+ //
+ // TODO this restriction can be removed given adjusted offset in computeDeferReturn in cmd/link/internal/ld/pcln.go
s.hasOpenDefers = false
}
if s.hasOpenDefers && s.instrumentEnterExit {
}
s.openDeferExit()
} else {
+ // Shared deferreturn is assigned the "last" position in the function.
+ // The linker picks the first deferreturn call it sees, so this is
+ // the only sensible "shared" place.
+ // To not-share deferreturn, the protocol would need to be changed
+ // so that the call to deferproc-etc would receive the PC offset from
+ // the return PC, and the runtime would need to use that instead of
+ // the deferreturn retrieved from the pcln information.
+ // opendefers would remain a problem, however.
+ s.pushLine(s.curfn.Endlineno)
s.rtcall(ir.Syms.Deferreturn, true, nil)
+ s.popLine()
}
}
s.Fatalf("go/defer call with arguments: %v", n)
}
+ isCallDeferRangeFunc := false
+
switch n.Op() {
case ir.OCALLFUNC:
if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
}
}
if fn := n.Fun.Sym().Name; n.Fun.Sym().Pkg == ir.Pkgs.Runtime && fn == "deferrangefunc" {
- s.f.HasDeferRangeFunc = true
+ isCallDeferRangeFunc = true
}
break
}
}
// Finish block for defers
- if k == callDefer || k == callDeferStack {
+ if k == callDefer || k == callDeferStack || isCallDeferRangeFunc {
b := s.endBlock()
b.Kind = ssa.BlockDefer
b.SetControl(call)
bNext := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bNext)
- // Add recover edge to exit code.
- r := s.f.NewBlock(ssa.BlockPlain)
- s.startBlock(r)
- s.exit()
- b.AddEdgeTo(r)
+ r := s.f.DeferReturn // Share a single deferreturn among all defers
+ if r == nil {
+ r = s.f.NewBlock(ssa.BlockPlain)
+ s.startBlock(r)
+ s.exit()
+ s.f.DeferReturn = r
+ }
+ b.AddEdgeTo(r) // Add recover edge to exit code. This is a fake edge to keep the block live.
b.Likely = ssa.BranchLikely
s.startBlock(bNext)
}
// nop (which will never execute) after the call.
Arch.Ginsnop(s.pp)
}
- if openDeferInfo != nil || f.HasDeferRangeFunc {
+ if openDeferInfo != nil {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
- //
- // deferrangefunc needs to be sure that at least one of these exists;
- // if all returns are dead-code eliminated, there might not be.
+
+ // Note that this exit code doesn't work if a return parameter
+ // is heap-allocated, but open defers aren't enabled in that case.
+
+ // TODO either make this handle heap-allocated return parameters or reuse the other-defers general-purpose code path.
s.pp.NextLive = s.livenessMap.DeferReturn
p := s.pp.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
+ case ssa.BlockPlain, ssa.BlockDefer:
if next != b.Succs[0].Block() {
s.Br(obj.AJMP, b.Succs[0].Block())
}
case ssa.BlockExit, ssa.BlockRetJmp:
- case ssa.BlockDefer:
- p := s.Prog(wasm.AGet)
- p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
- s.Prog(wasm.AI64Eqz)
- s.Prog(wasm.AI32Eqz)
- s.Prog(wasm.AIf)
- s.Br(obj.AJMP, b.Succs[1].Block())
- s.Prog(wasm.AEnd)
- if next != b.Succs[0].Block() {
- s.Br(obj.AJMP, b.Succs[0].Block())
- }
-
default:
panic("unexpected block")
}
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
- case ssa.BlockPlain:
- if b.Succs[0].Block() != next {
- p := s.Prog(obj.AJMP)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
- }
- case ssa.BlockDefer:
- // defer returns in rax:
- // 0 if we should continue executing
- // 1 if we should jump to deferreturn call
- p := s.Prog(x86.ATESTL)
- p.From.Type = obj.TYPE_REG
- p.From.Reg = x86.REG_AX
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_AX
- p = s.Prog(x86.AJNE)
- p.To.Type = obj.TYPE_BRANCH
- s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ case ssa.BlockPlain, ssa.BlockDefer:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
//
// We disable open-coded defers in buildssa() on 386 ONLY with shared
// libraries because of this extra code added before deferreturn calls.
+ //
+ // computeDeferReturn in cmd/link/internal/ld/pcln.go depends
+ // on the size of these instructions.
if ctxt.Arch.Family == sys.AMD64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 {
return
}
// instruction).
deferreturn = uint32(r.Off())
switch target.Arch.Family {
- case sys.AMD64, sys.I386:
+ case sys.I386:
deferreturn--
+ if ctxt.BuildMode == BuildModeShared || ctxt.linkShared || ctxt.BuildMode == BuildModePlugin {
+ // In this mode, we need to get the address from GOT,
+ // with two additional instructions like
+ //
+ // CALL __x86.get_pc_thunk.bx(SB) // 5 bytes
+ // LEAL _GLOBAL_OFFSET_TABLE_<>(BX), BX // 6 bytes
+ //
+ // We need to back off to the get_pc_thunk call.
+ // (See progedit in cmd/internal/obj/x86/obj6.go)
+ deferreturn -= 11
+ }
+ case sys.AMD64:
+ deferreturn--
+
case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64:
// no change
case sys.S390X:
SETEQ ret+0(FP)
RET
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVL $0, AX
- RET
-
// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
// Must obey the gcc calling convention.
TEXT _cgo_topofstack(SB),NOSPLIT,$0
DATA shifts<>+0xf8(SB)/8, $0xff0f0e0d0c0b0a09
GLOBL shifts<>(SB),RODATA,$256
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVL $0, AX
- RET
-
-
// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
// Must obey the gcc calling convention.
TEXT _cgo_topofstack(SB),NOSPLIT,$0
TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-12
JMP runtime·memhash64Fallback(SB)
-TEXT runtime·return0(SB),NOSPLIT,$0
- MOVW $0, R0
- RET
-
TEXT runtime·procyield(SB),NOSPLIT|NOFRAME,$0
MOVW cycles+0(FP), R1
MOVW $0, R0
MOVD (R0), R0
UNDEF
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVW $0, R0
- RET
-
// The top-most function running on a goroutine
// returns to goexit+PCQuantum.
TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0
TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash64Fallback<ABIInternal>(SB)
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVW $0, R19
- RET
-
// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
// Must obey the gcc calling convention.
TEXT _cgo_topofstack(SB),NOSPLIT,$16
TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash64Fallback(SB)
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVW $0, R1
- RET
-
// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
// Must obey the gcc calling convention.
TEXT _cgo_topofstack(SB),NOSPLIT,$16
TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-12
JMP runtime·memhash64Fallback(SB)
-TEXT runtime·return0(SB),NOSPLIT,$0
- MOVW $0, R1
- RET
-
// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
// Must obey the gcc calling convention.
TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0
TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash64Fallback<ABIInternal>(SB)
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVW $0, R3
- RET
-
// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
// Must obey the gcc calling convention.
#ifdef GOOS_aix
TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash64Fallback<ABIInternal>(SB)
-// func return0()
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOV $0, A0
- RET
-
// restore state from Gobuf; longjmp
// func gogo(buf *gobuf)
TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash64Fallback(SB)
-TEXT runtime·return0(SB), NOSPLIT, $0
- MOVW $0, R3
- RET
-
// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
// Must obey the gcc calling convention.
TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0
TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
JMP runtime·memhash64Fallback(SB)
-TEXT runtime·return0(SB), NOSPLIT, $0-0
- MOVD $0, RET0
- RET
-
TEXT runtime·asminit(SB), NOSPLIT, $0-0
// No per-thread init.
RET
// storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
d.sp = sys.GetCallerSP()
-
- // deferproc returns 0 normally.
- // a deferred func that stops a panic
- // makes the deferproc return 1.
- // the code the compiler generates always
- // checks the return value and jumps to the
- // end of the function if deferproc returns != 0.
- return0()
- // No code can go here - the C return register has
- // been set and must not be clobbered.
}
var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
throw("defer on system stack")
}
- fn := findfunc(sys.GetCallerPC())
- if fn.deferreturn == 0 {
- throw("no deferreturn")
- }
-
d := newdefer()
d.link = gp._defer
gp._defer = d
- d.pc = fn.entry() + uintptr(fn.deferreturn)
+ d.pc = sys.GetCallerPC()
// We must not be preempted between calling GetCallerSP and
// storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
break
}
}
-
- // Must be last - see deferproc above.
- return0()
}
// deferconvert converts the rangefunc defer list of d0 into an ordinary list
// go code on the system stack can't defer
throw("defer on system stack")
}
+
// fn is already set.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
*(*uintptr)(unsafe.Pointer(&d.head)) = 0
*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
-
- return0()
- // No code can go here - the C return register has
- // been set and must not be clobbered.
}
// Each P holds a pool for defers.
fn := d.fn
- // TODO(mdempsky): Instead of having each deferproc call have
- // its own "deferreturn(); return" sequence, we should just make
- // them reuse the one we emit for open-coded defers.
p.retpc = d.pc
// Unlink and free.
pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
+ // The linker records the f-relative address of a call to deferreturn in f's funcInfo.
+ // Assuming a "normal" call to recover() inside one of f's deferred functions
+ // invoked for a panic, that is the desired PC for exiting f.
+ f := findfunc(pc)
+ if f.deferreturn == 0 {
+ throw("no deferreturn")
+ }
+ gotoPc := f.entry() + uintptr(f.deferreturn)
+
// Unwind the panic stack.
for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
// Don't allow jumping past a pending Goexit.
// With how subtle defer handling is, this might not actually be
// worthwhile though.
if p.goexit {
- pc, sp = p.startPC, uintptr(p.startSP)
+ gotoPc, sp = p.startPC, uintptr(p.startSP)
saveOpenDeferState = false // goexit is unwinding the stack anyway
break
}
throw("bad recovery")
}
- // Make the deferproc for this d return again,
- // this time returning 1. The calling function will
- // jump to the standard return epilogue.
+ // branch directly to the deferreturn
gp.sched.sp = sp
- gp.sched.pc = pc
+ gp.sched.pc = gotoPc
gp.sched.lr = 0
// Restore the bp on platforms that support frame pointers.
// N.B. It's fine to not set anything for platforms that don't
// only gets us to the caller's fp.
gp.sched.bp = sp - goarch.PtrSize
}
- // The value in ret is delivered IN A REGISTER, even if there is a
- // stack ABI.
- gp.sched.ret = 1
gogo(&gp.sched)
}
func rt0_go()
-// return0 is a stub used to return 0 from deferproc.
-// It is called at the very end of deferproc to signal
-// the calling Go function that it should not jump
-// to deferreturn.
-// in asm_*.s
-func return0()
-
// in asm_*.s
// not called directly; definitions here supply type information for traceback.
// These must have the same signature (arg pointer map) as reflectcall.