]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile, runtime: use PC of deferreturn for panic transfer
authorDavid Chase <drchase@google.com>
Wed, 19 Feb 2025 21:47:31 +0000 (16:47 -0500)
committerDavid Chase <drchase@google.com>
Tue, 25 Feb 2025 16:35:38 +0000 (08:35 -0800)
this removes the old conditional-on-register-value
handshake from the deferproc/deferprocstack logic.

The "line" for the recovery-exit frame itself (not the defers
that it runs) is the closing brace of the function.

Reduces code size slightly (e.g. go command is 0.2% smaller)

Sample output showing effect of this change, also what sort of
code it requires to observe the effect:
```
package main

import "os"

func main() {
g(len(os.Args) - 1)           // stack[0]
}

var gi int
var pi *int = &gi

//go:noinline
func g(i int) {
switch i {
case 0:
defer func() {
println("g0", i)
q()                  // stack[2] if i == 0
}()
for j := *pi; j < 1; j++ {
defer func() {
println("recover0", recover().(string))
}()
}
default:
for j := *pi; j < 1; j++ {
defer func() {
println("g1", i)
q()              // stack[2] if i == 1
}()
}
defer func() {
println("recover1", recover().(string))
}()
}
p()
}                                // stack[1] (deferreturn)

//go:noinline
func p() {
panic("p()")
}

//go:noinline
func q() {
panic("q()")                 // stack[3]
}

/* Sample output for "./foo foo":
recover1 p()
g1 1
panic: q()

goroutine 1 [running]:
main.q()
.../main.go:46 +0x2c
main.g.func3()
.../main.go:29 +0x48
main.g(0x1?)
.../main.go:37 +0x68
main.main()
.../main.go:6 +0x28
*/
```

Change-Id: Ie39ea62ecc244213500380ea06d44024cadc2317
Reviewed-on: https://go-review.googlesource.com/c/go/+/650795
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>

29 files changed:
src/cmd/compile/internal/amd64/ssa.go
src/cmd/compile/internal/arm/ssa.go
src/cmd/compile/internal/arm64/ssa.go
src/cmd/compile/internal/loong64/ssa.go
src/cmd/compile/internal/mips/ssa.go
src/cmd/compile/internal/mips64/ssa.go
src/cmd/compile/internal/ppc64/ssa.go
src/cmd/compile/internal/riscv64/ssa.go
src/cmd/compile/internal/s390x/ssa.go
src/cmd/compile/internal/ssa/_gen/genericOps.go
src/cmd/compile/internal/ssa/func.go
src/cmd/compile/internal/ssagen/ssa.go
src/cmd/compile/internal/wasm/ssa.go
src/cmd/compile/internal/x86/ssa.go
src/cmd/internal/obj/x86/obj6.go
src/cmd/link/internal/ld/pcln.go
src/runtime/asm_386.s
src/runtime/asm_amd64.s
src/runtime/asm_arm.s
src/runtime/asm_arm64.s
src/runtime/asm_loong64.s
src/runtime/asm_mips64x.s
src/runtime/asm_mipsx.s
src/runtime/asm_ppc64x.s
src/runtime/asm_riscv64.s
src/runtime/asm_s390x.s
src/runtime/asm_wasm.s
src/runtime/panic.go
src/runtime/stubs.go

index 9eef71f760357a9a3502849d5baaf910fe17bbc5..332c49af0012ac5d604adb93e3784ab540c1e58b 100644 (file)
@@ -1441,24 +1441,7 @@ var nefJumps = [2][2]ssagen.IndexJump{
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-       case ssa.BlockDefer:
-               // defer returns in rax:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(x86.ATESTL)
-               p.From.Type = obj.TYPE_REG
-               p.From.Reg = x86.REG_AX
-               p.To.Type = obj.TYPE_REG
-               p.To.Reg = x86.REG_AX
-               p = s.Prog(x86.AJNE)
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index 638ed3ed4ef3ba5eee9e7d4cb8db3bb654d06b60..f129ab493d35507b53d37b495f8084e6991a3672 100644 (file)
@@ -918,24 +918,7 @@ var gtJumps = [2][2]ssagen.IndexJump{
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-
-       case ssa.BlockDefer:
-               // defer returns in R0:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(arm.ACMP)
-               p.From.Type = obj.TYPE_CONST
-               p.From.Offset = 0
-               p.Reg = arm.REG_R0
-               p = s.Prog(arm.ABNE)
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index 0f5c5a17bd5fa2ca6fa838a9e1b1ae0fe432b14b..957e943e4468951bb3bf30803783a23b327ae67e 100644 (file)
@@ -1327,24 +1327,7 @@ var gtJumps = [2][2]ssagen.IndexJump{
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-
-       case ssa.BlockDefer:
-               // defer returns in R0:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(arm64.ACMP)
-               p.From.Type = obj.TYPE_CONST
-               p.From.Offset = 0
-               p.Reg = arm64.REG_R0
-               p = s.Prog(arm64.ABNE)
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index 0ba9efa1d364274170233e0a246b2257e66e91fc..e8b8b27f8745af7b61ac7fb996a7a250abbbdf51 100644 (file)
@@ -970,22 +970,7 @@ var blockJump = map[ssa.BlockKind]struct {
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-       case ssa.BlockDefer:
-               // defer returns in R19:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(loong64.ABNE)
-               p.From.Type = obj.TYPE_REG
-               p.From.Reg = loong64.REGZERO
-               p.Reg = loong64.REG_R19
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index bfccafd8e5a52ee57ac8a398cd6a57eadde4ad04..4c7c8eafcda30dd068571631717b130020084610 100644 (file)
@@ -826,22 +826,7 @@ var blockJump = map[ssa.BlockKind]struct {
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-       case ssa.BlockDefer:
-               // defer returns in R1:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(mips.ABNE)
-               p.From.Type = obj.TYPE_REG
-               p.From.Reg = mips.REGZERO
-               p.Reg = mips.REG_R1
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index 0c0dc6e4955c1e7d6cef033564f22267da14d6e3..5b5edf622a1e9b902553f462b0dea2638fe2ca88 100644 (file)
@@ -835,22 +835,7 @@ var blockJump = map[ssa.BlockKind]struct {
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-       case ssa.BlockDefer:
-               // defer returns in R1:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(mips.ABNE)
-               p.From.Type = obj.TYPE_REG
-               p.From.Reg = mips.REGZERO
-               p.Reg = mips.REG_R1
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index 53ec4289c7bec20777596bb0583f396cfd08f71a..c1f2484bf4482a7f7e21283cf3f7fd09a6f59aa8 100644 (file)
@@ -2003,26 +2003,7 @@ var blockJump = [...]struct {
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockDefer:
-               // defer returns in R3:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(ppc64.ACMP)
-               p.From.Type = obj.TYPE_REG
-               p.From.Reg = ppc64.REG_R3
-               p.To.Type = obj.TYPE_CONST
-               p.To.Offset = 0
-
-               p = s.Prog(ppc64.ABNE)
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-
-       case ssa.BlockPlain:
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index 759d8d7cf491bcf3ba9cc176ef1d5ccf517804ac..636ef44d68e6a3edbcdba5dd604ff57c10994bb1 100644 (file)
@@ -802,22 +802,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        s.SetPos(b.Pos)
 
        switch b.Kind {
-       case ssa.BlockDefer:
-               // defer returns in A0:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(riscv.ABNE)
-               p.To.Type = obj.TYPE_BRANCH
-               p.From.Type = obj.TYPE_REG
-               p.From.Reg = riscv.REG_ZERO
-               p.Reg = riscv.REG_A0
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-       case ssa.BlockPlain:
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index a97c1569c11806ea37ec8bc70c7d444938e38676..4d24881dbaf5d59911b795211f51b2708c3f39be 100644 (file)
@@ -887,26 +887,13 @@ func blockAsm(b *ssa.Block) obj.As {
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        // Handle generic blocks first.
        switch b.Kind {
-       case ssa.BlockPlain:
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(s390x.ABR)
                        p.To.Type = obj.TYPE_BRANCH
                        s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
                }
                return
-       case ssa.BlockDefer:
-               // defer returns in R3:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Br(s390x.ACIJ, b.Succs[1].Block())
-               p.From.Type = obj.TYPE_CONST
-               p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
-               p.Reg = s390x.REG_R3
-               p.AddRestSourceConst(0)
-               if b.Succs[0].Block() != next {
-                       s.Br(s390x.ABR, b.Succs[0].Block())
-               }
-               return
        case ssa.BlockExit, ssa.BlockRetJmp:
                return
        case ssa.BlockRet:
index 4dde6d51c5b22eeda72336603c08127cf07a2f91..37de6e99190d335375259bb09a565231fbd82f42 100644 (file)
@@ -663,21 +663,21 @@ var genericOps = []opData{
        {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory.
 }
 
-//     kind          controls        successors   implicit exit
-//   ----------------------------------------------------------
-//     Exit      [return mem]                []             yes
-//      Ret      [return mem]                []             yes
-//   RetJmp      [return mem]                []             yes
-//    Plain                []            [next]
-//       If   [boolean Value]      [then, else]
-//    First                []   [always, never]
-//    Defer             [mem]  [nopanic, panic]                  (control opcode should be OpStaticCall to runtime.deferproc)
-// JumpTable   [integer Value]  [succ1,succ2,..]
+//     kind          controls          successors   implicit exit
+//   ------------------------------------------------------------
+//     Exit      [return mem]                  []             yes
+//      Ret      [return mem]                  []             yes
+//   RetJmp      [return mem]                  []             yes
+//    Plain                []              [next]
+//       If   [boolean Value]        [then, else]
+//    First                []     [always, never]
+//    Defer             [mem] [nopanic, recovery]                  (control opcode should be OpStaticCall to runtime.defer*)
+// JumpTable   [integer Value]   [succ1,succ2,..]
 
 var genericBlocks = []blockData{
        {name: "Plain"},                  // a single successor
        {name: "If", controls: 1},        // if Controls[0] goto Succs[0] else goto Succs[1]
-       {name: "Defer", controls: 1},     // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
+       {name: "Defer", controls: 1},     // Succs[0]=defer queued, Succs[1]=defer recovery branch (jmp performed by runtime). Controls[0] is call op (of memory type).
        {name: "Ret", controls: 1},       // no successors, Controls[0] value is memory result
        {name: "RetJmp", controls: 1},    // no successors, Controls[0] value is a tail call
        {name: "Exit", controls: 1},      // no successors, Controls[0] value generates a panic
index 998cc804aa797982b1b814fe4a7b7235ecd73e75..12e4c268f0ea9b77939795bc29c3e80b144d1991 100644 (file)
@@ -41,12 +41,12 @@ type Func struct {
        ABISelf        *abi.ABIConfig // ABI for function being compiled
        ABIDefault     *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions.
 
-       scheduled         bool  // Values in Blocks are in final order
-       laidout           bool  // Blocks are ordered
-       NoSplit           bool  // true if function is marked as nosplit.  Used by schedule check pass.
-       dumpFileSeq       uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
-       IsPgoHot          bool
-       HasDeferRangeFunc bool // if true, needs a deferreturn so deferrangefunc can use it for recover() return PC
+       scheduled   bool  // Values in Blocks are in final order
+       laidout     bool  // Blocks are ordered
+       NoSplit     bool  // true if function is marked as nosplit.  Used by schedule check pass.
+       dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
+       IsPgoHot    bool
+       DeferReturn *Block // avoid creating more than one deferreturn if there's multiple calls to deferproc-etc.
 
        // when register allocation is done, maps value ids to locations
        RegAlloc []Location
index 306244424c14f50b389407992f9b8c255543cd30..07269e65f2fdda986c07b4add752659745000bc0 100644 (file)
@@ -410,6 +410,8 @@ func buildssa(fn *ir.Func, worker int, isPgoHot bool) *ssa.Func {
                // Don't support open-coded defers for 386 ONLY when using shared
                // libraries, because there is extra code (added by rewriteToUseGot())
                // preceding the deferreturn/ret code that we don't track correctly.
+               //
+               // TODO this restriction can be removed given adjusted offset in computeDeferReturn in cmd/link/internal/ld/pcln.go
                s.hasOpenDefers = false
        }
        if s.hasOpenDefers && s.instrumentEnterExit {
@@ -2166,7 +2168,17 @@ func (s *state) exit() *ssa.Block {
                        }
                        s.openDeferExit()
                } else {
+                       // Shared deferreturn is assigned the "last" position in the function.
+                       // The linker picks the first deferreturn call it sees, so this is
+                       // the only sensible "shared" place.
+                       // To not-share deferreturn, the protocol would need to be changed
+                       // so that the call to deferproc-etc would receive the PC offset from
+                       // the return PC, and the runtime would need to use that instead of
+                       // the deferreturn retrieved from the pcln information.
+                       // opendefers would remain a problem, however.
+                       s.pushLine(s.curfn.Endlineno)
                        s.rtcall(ir.Syms.Deferreturn, true, nil)
+                       s.popLine()
                }
        }
 
@@ -4411,6 +4423,8 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExt
                s.Fatalf("go/defer call with arguments: %v", n)
        }
 
+       isCallDeferRangeFunc := false
+
        switch n.Op() {
        case ir.OCALLFUNC:
                if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
@@ -4434,7 +4448,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExt
                                }
                        }
                        if fn := n.Fun.Sym().Name; n.Fun.Sym().Pkg == ir.Pkgs.Runtime && fn == "deferrangefunc" {
-                               s.f.HasDeferRangeFunc = true
+                               isCallDeferRangeFunc = true
                        }
                        break
                }
@@ -4596,17 +4610,20 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExt
        }
 
        // Finish block for defers
-       if k == callDefer || k == callDeferStack {
+       if k == callDefer || k == callDeferStack || isCallDeferRangeFunc {
                b := s.endBlock()
                b.Kind = ssa.BlockDefer
                b.SetControl(call)
                bNext := s.f.NewBlock(ssa.BlockPlain)
                b.AddEdgeTo(bNext)
-               // Add recover edge to exit code.
-               r := s.f.NewBlock(ssa.BlockPlain)
-               s.startBlock(r)
-               s.exit()
-               b.AddEdgeTo(r)
+               r := s.f.DeferReturn // Share a single deferreturn among all defers
+               if r == nil {
+                       r = s.f.NewBlock(ssa.BlockPlain)
+                       s.startBlock(r)
+                       s.exit()
+                       s.f.DeferReturn = r
+               }
+               b.AddEdgeTo(r) // Add recover edge to exit code.  This is a fake edge to keep the block live.
                b.Likely = ssa.BranchLikely
                s.startBlock(bNext)
        }
@@ -6571,13 +6588,15 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
                // nop (which will never execute) after the call.
                Arch.Ginsnop(s.pp)
        }
-       if openDeferInfo != nil || f.HasDeferRangeFunc {
+       if openDeferInfo != nil {
                // When doing open-coded defers, generate a disconnected call to
                // deferreturn and a return. This will be used to during panic
                // recovery to unwind the stack and return back to the runtime.
-               //
-               // deferrangefunc needs to be sure that at least one of these exists;
-               // if all returns are dead-code eliminated, there might not be.
+
+               // Note that this exit code doesn't work if a return parameter
+               // is heap-allocated, but open defers aren't enabled in that case.
+
+               // TODO either make this handle heap-allocated return parameters or reuse the other-defers general-purpose code path.
                s.pp.NextLive = s.livenessMap.DeferReturn
                p := s.pp.Prog(obj.ACALL)
                p.To.Type = obj.TYPE_MEM
index 85f34a77073ed7bcc75c3a44628e308604dcbd94..daee82f1fd73664ad0e213ebab4e38ad84ae793c 100644 (file)
@@ -169,7 +169,7 @@ func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
+       case ssa.BlockPlain, ssa.BlockDefer:
                if next != b.Succs[0].Block() {
                        s.Br(obj.AJMP, b.Succs[0].Block())
                }
@@ -203,18 +203,6 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
 
        case ssa.BlockExit, ssa.BlockRetJmp:
 
-       case ssa.BlockDefer:
-               p := s.Prog(wasm.AGet)
-               p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
-               s.Prog(wasm.AI64Eqz)
-               s.Prog(wasm.AI32Eqz)
-               s.Prog(wasm.AIf)
-               s.Br(obj.AJMP, b.Succs[1].Block())
-               s.Prog(wasm.AEnd)
-               if next != b.Succs[0].Block() {
-                       s.Br(obj.AJMP, b.Succs[0].Block())
-               }
-
        default:
                panic("unexpected block")
        }
index 35ad2d90e664107da956ee267f73271b0264db88..347c5cb5602c7404151bf32591a113c067ce179f 100644 (file)
@@ -946,24 +946,7 @@ var nefJumps = [2][2]ssagen.IndexJump{
 
 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
        switch b.Kind {
-       case ssa.BlockPlain:
-               if b.Succs[0].Block() != next {
-                       p := s.Prog(obj.AJMP)
-                       p.To.Type = obj.TYPE_BRANCH
-                       s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
-               }
-       case ssa.BlockDefer:
-               // defer returns in rax:
-               // 0 if we should continue executing
-               // 1 if we should jump to deferreturn call
-               p := s.Prog(x86.ATESTL)
-               p.From.Type = obj.TYPE_REG
-               p.From.Reg = x86.REG_AX
-               p.To.Type = obj.TYPE_REG
-               p.To.Reg = x86.REG_AX
-               p = s.Prog(x86.AJNE)
-               p.To.Type = obj.TYPE_BRANCH
-               s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+       case ssa.BlockPlain, ssa.BlockDefer:
                if b.Succs[0].Block() != next {
                        p := s.Prog(obj.AJMP)
                        p.To.Type = obj.TYPE_BRANCH
index e6ea8985e4f78c326a2df0c6b015db0d963ed083..53c091825414190e32d5bdd0a235dd149185f1c9 100644 (file)
@@ -448,6 +448,9 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
                //
                // We disable open-coded defers in buildssa() on 386 ONLY with shared
                // libraries because of this extra code added before deferreturn calls.
+               //
+               // computeDeferReturn in cmd/link/internal/ld/pcln.go depends
+               // on the size of these instructions.
                if ctxt.Arch.Family == sys.AMD64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 {
                        return
                }
index ea08fd3d3175db23cb032d14c63149825fd75cb1..a09d3acd5e265b6780a7bed42d5ca39fa4fa971e 100644 (file)
@@ -143,8 +143,22 @@ func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 {
                                // instruction).
                                deferreturn = uint32(r.Off())
                                switch target.Arch.Family {
-                               case sys.AMD64, sys.I386:
+                               case sys.I386:
                                        deferreturn--
+                                       if ctxt.BuildMode == BuildModeShared || ctxt.linkShared || ctxt.BuildMode == BuildModePlugin {
+                                               // In this mode, we need to get the address from GOT,
+                                               // with two additional instructions like
+                                               //
+                                               // CALL    __x86.get_pc_thunk.bx(SB)       // 5 bytes
+                                               // LEAL    _GLOBAL_OFFSET_TABLE_<>(BX), BX // 6 bytes
+                                               //
+                                               // We need to back off to the get_pc_thunk call.
+                                               // (See progedit in cmd/internal/obj/x86/obj6.go)
+                                               deferreturn -= 11
+                                       }
+                               case sys.AMD64:
+                                       deferreturn--
+
                                case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64:
                                        // no change
                                case sys.S390X:
index 5aafe14be9a107042c6188ecfd0e61bbe8c1fadb..b4818723e5c735f9805717a85544a82e6f90b9bd 100644 (file)
@@ -1373,10 +1373,6 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1
        SETEQ   ret+0(FP)
        RET
 
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOVL    $0, AX
-       RET
-
 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
 // Must obey the gcc calling convention.
 TEXT _cgo_topofstack(SB),NOSPLIT,$0
index cdf9874a7f930e00f2b1592a2feb51b5a7ea2028..4b630b5eccb503903afa9373e1b682d6d713ce01 100644 (file)
@@ -1679,11 +1679,6 @@ DATA shifts<>+0xf0(SB)/8, $0x0807060504030201
 DATA shifts<>+0xf8(SB)/8, $0xff0f0e0d0c0b0a09
 GLOBL shifts<>(SB),RODATA,$256
 
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOVL    $0, AX
-       RET
-
-
 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
 // Must obey the gcc calling convention.
 TEXT _cgo_topofstack(SB),NOSPLIT,$0
index 4d57ec6062ce14252136a986d761d10654c690c6..7c39b4a3e232feb68c20fbd2b18da83e26049b3b 100644 (file)
@@ -846,10 +846,6 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-12
 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-12
        JMP     runtime·memhash64Fallback(SB)
 
-TEXT runtime·return0(SB),NOSPLIT,$0
-       MOVW    $0, R0
-       RET
-
 TEXT runtime·procyield(SB),NOSPLIT|NOFRAME,$0
        MOVW    cycles+0(FP), R1
        MOVW    $0, R0
index bf9ab6bcbced8ea1c1d2d4206a7cd13828d92f90..238eaf2789876968b4e652b8925bcf46d81c6179 100644 (file)
@@ -1263,10 +1263,6 @@ TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
        MOVD    (R0), R0
        UNDEF
 
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOVW    $0, R0
-       RET
-
 // The top-most function running on a goroutine
 // returns to goexit+PCQuantum.
 TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0
index 1c5ced4512a192753e75caf93f0ab31b7c834429..de64f8acbcdba9a8973281f0a211db256b44be22 100644 (file)
@@ -679,10 +679,6 @@ TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
 TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
        JMP     runtime·memhash64Fallback<ABIInternal>(SB)
 
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOVW    $0, R19
-       RET
-
 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
 // Must obey the gcc calling convention.
 TEXT _cgo_topofstack(SB),NOSPLIT,$16
index 80cd87c4af335f6760165e7c9edf9a895df2bf06..cfb9950e17108a2febdbc5e11a5b07817e682823 100644 (file)
@@ -644,10 +644,6 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
        JMP     runtime·memhash64Fallback(SB)
 
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOVW    $0, R1
-       RET
-
 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
 // Must obey the gcc calling convention.
 TEXT _cgo_topofstack(SB),NOSPLIT,$16
index ca95f22bd67f4697781f261cd202dafb2dff693b..33afa2e5c5ad3ed34ee416d1f7a8341dbdad696e 100644 (file)
@@ -634,10 +634,6 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-12
 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-12
        JMP     runtime·memhash64Fallback(SB)
 
-TEXT runtime·return0(SB),NOSPLIT,$0
-       MOVW    $0, R1
-       RET
-
 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
 // Must obey the gcc calling convention.
 TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0
index 2b8c4d42a3deaf3245e73e355bffab82c662238e..268e0c01c12b9d6377ba5156e7e1afc40b7272f1 100644 (file)
@@ -980,10 +980,6 @@ TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
 TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
        JMP     runtime·memhash64Fallback<ABIInternal>(SB)
 
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOVW    $0, R3
-       RET
-
 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
 // Must obey the gcc calling convention.
 #ifdef GOOS_aix
index 71b32304d7b0ae38cc8072dd190819348b4a881a..20c9fdf7ff0be795b8c2c501807b424404dccac5 100644 (file)
@@ -247,11 +247,6 @@ TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
 TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
        JMP     runtime·memhash64Fallback<ABIInternal>(SB)
 
-// func return0()
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOV     $0, A0
-       RET
-
 // restore state from Gobuf; longjmp
 
 // func gogo(buf *gobuf)
index f2354a6d53614a8b0e1713923ea81079218fed66..6758175fc2091eb26d2eb5ba5e1c26900e7d0a08 100644 (file)
@@ -767,10 +767,6 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
        JMP     runtime·memhash64Fallback(SB)
 
-TEXT runtime·return0(SB), NOSPLIT, $0
-       MOVW    $0, R3
-       RET
-
 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
 // Must obey the gcc calling convention.
 TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0
index 69da583a1d54e9635bd45afc4b41ea28cf441c68..247368d127d013313fef4fa2b7176111d7a12bc5 100644 (file)
@@ -195,10 +195,6 @@ TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
        JMP     runtime·memhash64Fallback(SB)
 
-TEXT runtime·return0(SB), NOSPLIT, $0-0
-       MOVD $0, RET0
-       RET
-
 TEXT runtime·asminit(SB), NOSPLIT, $0-0
        // No per-thread init.
        RET
index 1ed2503320a9126fb197c155927bf17b50ac3e7d..c31cfd6e1aeff4e2772cdff776faee39bfcc7b07 100644 (file)
@@ -285,16 +285,6 @@ func deferproc(fn func()) {
        // storing it to d.sp because GetCallerSP's result is a
        // uintptr stack pointer.
        d.sp = sys.GetCallerSP()
-
-       // deferproc returns 0 normally.
-       // a deferred func that stops a panic
-       // makes the deferproc return 1.
-       // the code the compiler generates always
-       // checks the return value and jumps to the
-       // end of the function if deferproc returns != 0.
-       return0()
-       // No code can go here - the C return register has
-       // been set and must not be clobbered.
 }
 
 var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
@@ -391,15 +381,10 @@ func deferrangefunc() any {
                throw("defer on system stack")
        }
 
-       fn := findfunc(sys.GetCallerPC())
-       if fn.deferreturn == 0 {
-               throw("no deferreturn")
-       }
-
        d := newdefer()
        d.link = gp._defer
        gp._defer = d
-       d.pc = fn.entry() + uintptr(fn.deferreturn)
+       d.pc = sys.GetCallerPC()
        // We must not be preempted between calling GetCallerSP and
        // storing it to d.sp because GetCallerSP's result is a
        // uintptr stack pointer.
@@ -434,9 +419,6 @@ func deferprocat(fn func(), frame any) {
                        break
                }
        }
-
-       // Must be last - see deferproc above.
-       return0()
 }
 
 // deferconvert converts the rangefunc defer list of d0 into an ordinary list
@@ -484,6 +466,7 @@ func deferprocStack(d *_defer) {
                // go code on the system stack can't defer
                throw("defer on system stack")
        }
+
        // fn is already set.
        // The other fields are junk on entry to deferprocStack and
        // are initialized here.
@@ -506,10 +489,6 @@ func deferprocStack(d *_defer) {
        *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
        *(*uintptr)(unsafe.Pointer(&d.head)) = 0
        *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
-
-       return0()
-       // No code can go here - the C return register has
-       // been set and must not be clobbered.
 }
 
 // Each P holds a pool for defers.
@@ -927,9 +906,6 @@ func (p *_panic) nextDefer() (func(), bool) {
 
                        fn := d.fn
 
-                       // TODO(mdempsky): Instead of having each deferproc call have
-                       // its own "deferreturn(); return" sequence, we should just make
-                       // them reuse the one we emit for open-coded defers.
                        p.retpc = d.pc
 
                        // Unlink and free.
@@ -1159,6 +1135,15 @@ func recovery(gp *g) {
        pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
        p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
 
+       // The linker records the f-relative address of a call to deferreturn in f's funcInfo.
+       // Assuming a "normal" call to recover() inside one of f's deferred functions
+       // invoked for a panic, that is the desired PC for exiting f.
+       f := findfunc(pc)
+       if f.deferreturn == 0 {
+               throw("no deferreturn")
+       }
+       gotoPc := f.entry() + uintptr(f.deferreturn)
+
        // Unwind the panic stack.
        for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
                // Don't allow jumping past a pending Goexit.
@@ -1181,7 +1166,7 @@ func recovery(gp *g) {
                // With how subtle defer handling is, this might not actually be
                // worthwhile though.
                if p.goexit {
-                       pc, sp = p.startPC, uintptr(p.startSP)
+                       gotoPc, sp = p.startPC, uintptr(p.startSP)
                        saveOpenDeferState = false // goexit is unwinding the stack anyway
                        break
                }
@@ -1242,11 +1227,9 @@ func recovery(gp *g) {
                throw("bad recovery")
        }
 
-       // Make the deferproc for this d return again,
-       // this time returning 1. The calling function will
-       // jump to the standard return epilogue.
+       // branch directly to the deferreturn
        gp.sched.sp = sp
-       gp.sched.pc = pc
+       gp.sched.pc = gotoPc
        gp.sched.lr = 0
        // Restore the bp on platforms that support frame pointers.
        // N.B. It's fine to not set anything for platforms that don't
@@ -1263,9 +1246,6 @@ func recovery(gp *g) {
                // only gets us to the caller's fp.
                gp.sched.bp = sp - goarch.PtrSize
        }
-       // The value in ret is delivered IN A REGISTER, even if there is a
-       // stack ABI.
-       gp.sched.ret = 1
        gogo(&gp.sched)
 }
 
index ecf97666d75dec9bdca40557d3045e8fd7653dd6..20fc1c59ad933b3a30eabe2a1118ffb59a2fc9e8 100644 (file)
@@ -326,13 +326,6 @@ func morestack_noctxt()
 
 func rt0_go()
 
-// return0 is a stub used to return 0 from deferproc.
-// It is called at the very end of deferproc to signal
-// the calling Go function that it should not jump
-// to deferreturn.
-// in asm_*.s
-func return0()
-
 // in asm_*.s
 // not called directly; definitions here supply type information for traceback.
 // These must have the same signature (arg pointer map) as reflectcall.