]> Cypherpunks repositories - gostls13.git/commitdiff
runtime/secret: implement new secret package
authorDaniel Morsing <daniel.morsing@gmail.com>
Thu, 25 Sep 2025 16:26:03 +0000 (17:26 +0100)
committerGopher Robot <gobot@golang.org>
Wed, 26 Nov 2025 23:42:52 +0000 (15:42 -0800)
Implement secret.Do.

- When secret.Do returns:
  - Clear stack that is used by the argument function.
  - Clear all the registers that might contain secrets.
- On stack growth in secret mode, clear the old stack.
- When objects are allocated in secret mode, mark them and then zero
  the marked objects immediately when they are freed.
- If the argument function panics, raise that panic as if it originated
  from secret.Do. This removes anything about the secret function
  from tracebacks.

For now, this is only implemented on linux for arm64 and amd64.

This is a rebased version of Keith Randalls initial implementation at
CL 600635. I have added arm64 support, signal handling, preemption
handling and dealt with vDSOs spilling into system stacks.

Fixes #21865

Change-Id: I6fbd5a233beeaceb160785e0c0199a5c94d8e520
Co-authored-by: Keith Randall <khr@golang.org>
Reviewed-on: https://go-review.googlesource.com/c/go/+/704615
Reviewed-by: Roland Shoemaker <roland@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Filippo Valsorda <filippo@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
42 files changed:
doc/next/6-stdlib/1-secret.md [new file with mode: 0644]
src/cmd/dist/test.go
src/go/build/deps_test.go
src/internal/goexperiment/exp_runtimesecret_off.go [new file with mode: 0644]
src/internal/goexperiment/exp_runtimesecret_on.go [new file with mode: 0644]
src/internal/goexperiment/flags.go
src/runtime/_mkmalloc/mkmalloc.go
src/runtime/asm_amd64.s
src/runtime/asm_arm64.s
src/runtime/malloc.go
src/runtime/malloc_generated.go
src/runtime/malloc_stubs.go
src/runtime/mgc.go
src/runtime/mheap.go
src/runtime/preempt.go
src/runtime/proc.go
src/runtime/runtime2.go
src/runtime/secret.go [new file with mode: 0644]
src/runtime/secret/asm_amd64.s [new file with mode: 0644]
src/runtime/secret/asm_arm64.s [new file with mode: 0644]
src/runtime/secret/crash_test.go [new file with mode: 0644]
src/runtime/secret/export.go [new file with mode: 0644]
src/runtime/secret/secret.go [new file with mode: 0644]
src/runtime/secret/secret_test.go [new file with mode: 0644]
src/runtime/secret/stubs.go [new file with mode: 0644]
src/runtime/secret/stubs_noasm.go [new file with mode: 0644]
src/runtime/secret/testdata/crash.go [new file with mode: 0644]
src/runtime/secret_amd64.s [new file with mode: 0644]
src/runtime/secret_arm64.s [new file with mode: 0644]
src/runtime/secret_asm.go [new file with mode: 0644]
src/runtime/secret_noasm.go [new file with mode: 0644]
src/runtime/secret_nosecret.go [new file with mode: 0644]
src/runtime/signal_linux_amd64.go
src/runtime/signal_linux_arm64.go
src/runtime/signal_unix.go
src/runtime/sizeof_test.go
src/runtime/stack.go
src/runtime/sys_linux_amd64.s
src/runtime/sys_linux_arm64.s
src/runtime/time_linux_amd64.s
src/runtime/vgetrandom_linux.go
src/syscall/asm_linux_amd64.s

diff --git a/doc/next/6-stdlib/1-secret.md b/doc/next/6-stdlib/1-secret.md
new file mode 100644 (file)
index 0000000..738d02f
--- /dev/null
@@ -0,0 +1,20 @@
+### New secret package
+
+<!-- https://go.dev/issue/21865 --->
+
+The new [secret](/pkg/runtime/secret) package is available as an experiment.
+It provides a facility for securely erasing temporaries used in
+code that manipulates secret information, typically cryptographic in nature.
+Users can access it by passing `GOEXPERIMENT=runtimesecret` at build time.
+
+<!-- if we land any code that uses runtimesecret for forward secrecy
+like crypto/tls, mention them here too -->
+
+The secret.Do function runs its function argument and then erases all
+temporary storage (registers, stack, new heap allocations) used by
+that function argument. Heap storage is not erased until that storage
+is deemed unreachable by the garbage collector, which might take some
+time after secret.Do completes.
+
+This package is intended to make it easier to ensure [forward
+secrecy](https://en.wikipedia.org/wiki/Forward_secrecy).
index 73ea5c4015a23eaaa85486466e826f3ac6c0da99..f8d19ac34c562f6e71a7c384d58514e3586e1ede 100644 (file)
@@ -753,6 +753,15 @@ func (t *tester) registerTests() {
                })
        }
 
+       // Test GOEXPERIMENT=runtimesecret.
+       if !strings.Contains(goexperiment, "runtimesecret") {
+               t.registerTest("GOEXPERIMENT=runtimesecret go test runtime/secret/...", &goTest{
+                       variant: "runtimesecret",
+                       env:     []string{"GOEXPERIMENT=runtimesecret"},
+                       pkg:     "runtime/secret/...",
+               })
+       }
+
        // Test ios/amd64 for the iOS simulator.
        if goos == "darwin" && goarch == "amd64" && t.cgoEnabled {
                t.registerTest("GOOS=ios on darwin/amd64",
index 5466f025e1b11d4c021b645954b6f37453dbea98..e329c8a172725c8c8f89217fb94f473e7163e7b8 100644 (file)
@@ -108,6 +108,7 @@ var depsRules = `
        < internal/runtime/cgroup
        < internal/runtime/gc/scan
        < runtime
+       < runtime/secret
        < sync/atomic
        < internal/sync
        < weak
diff --git a/src/internal/goexperiment/exp_runtimesecret_off.go b/src/internal/goexperiment/exp_runtimesecret_off.go
new file mode 100644 (file)
index 0000000..d203589
--- /dev/null
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.runtimesecret
+
+package goexperiment
+
+const RuntimeSecret = false
+const RuntimeSecretInt = 0
diff --git a/src/internal/goexperiment/exp_runtimesecret_on.go b/src/internal/goexperiment/exp_runtimesecret_on.go
new file mode 100644 (file)
index 0000000..3788953
--- /dev/null
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.runtimesecret
+
+package goexperiment
+
+const RuntimeSecret = true
+const RuntimeSecretInt = 1
index 2e14d4298a6eccf5edf4312c17152bf0b1664633..2cfb71578b421b5328289cae2078f9e7a2547427 100644 (file)
@@ -125,4 +125,7 @@ type Flags struct {
        // SIMD enables the simd package and the compiler's handling
        // of SIMD intrinsics.
        SIMD bool
+
+       // RuntimeSecret enables the runtime/secret package.
+       RuntimeSecret bool
 }
index 1f040c88610dda29a91eb36e658000fd1e2ae427..46c50d6661116958ce50257c2c8887308735b88b 100644 (file)
@@ -171,6 +171,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator
                                        {subBasicLit, "elemsize_", str(elemsize)},
                                        {subBasicLit, "sizeclass_", str(sc)},
                                        {subBasicLit, "noscanint_", str(noscan)},
+                                       {subBasicLit, "isTiny_", str(0)},
                                },
                        })
                }
@@ -198,6 +199,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator
                                        {subBasicLit, "sizeclass_", str(tinySizeClass)},
                                        {subBasicLit, "size_", str(s)},
                                        {subBasicLit, "noscanint_", str(noscan)},
+                                       {subBasicLit, "isTiny_", str(1)},
                                },
                        })
                }
@@ -215,6 +217,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator
                                        {subBasicLit, "elemsize_", str(elemsize)},
                                        {subBasicLit, "sizeclass_", str(sc)},
                                        {subBasicLit, "noscanint_", str(noscan)},
+                                       {subBasicLit, "isTiny_", str(0)},
                                },
                        })
                }
index ed46ad4a2846b7fdedf690fad9bb3b7fe1953682..bf208a4d2914a0d67616f77aaa6b378d44bbd2d8 100644 (file)
@@ -456,6 +456,13 @@ TEXT gogo<>(SB), NOSPLIT, $0
 // Fn must never return. It should gogo(&g->sched)
 // to keep running g.
 TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_runtimesecret
+       CMPL    g_secret(R14), $0
+       JEQ     nosecret
+       CALL    ·secretEraseRegistersMcall(SB)
+nosecret:
+#endif
+
        MOVQ    AX, DX  // DX = fn
 
        // Save state in g->sched. The caller's SP and PC are restored by gogo to
@@ -511,6 +518,17 @@ TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
 
 // func systemstack(fn func())
 TEXT runtime·systemstack(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_runtimesecret
+       // If in secret mode, erase registers on transition
+       // from G stack to M stack,
+       get_tls(CX)
+       MOVQ    g(CX), AX
+       CMPL    g_secret(AX), $0
+       JEQ     nosecret
+       CALL    ·secretEraseRegisters(SB)
+nosecret:
+#endif
+
        MOVQ    fn+0(FP), DI    // DI = fn
        get_tls(CX)
        MOVQ    g(CX), AX       // AX = g
@@ -643,6 +661,18 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
        MOVQ    AX, (m_morebuf+gobuf_sp)(BX)
        MOVQ    DI, (m_morebuf+gobuf_g)(BX)
 
+       // If in secret mode, erase registers on transition
+       // from G stack to M stack,
+#ifdef GOEXPERIMENT_runtimesecret
+       CMPL    g_secret(DI), $0
+       JEQ     nosecret
+       CALL    ·secretEraseRegisters(SB)
+       get_tls(CX)
+       MOVQ    g(CX), DI     // DI = g
+       MOVQ    g_m(DI), BX   // BX = m
+nosecret:
+#endif
+
        // Call newstack on m->g0's stack.
        MOVQ    m_g0(BX), BX
        MOVQ    BX, g(CX)
@@ -917,11 +947,6 @@ TEXT ·asmcgocall_landingpad(SB),NOSPLIT,$0-0
 // aligned appropriately for the gcc ABI.
 // See cgocall.go for more details.
 TEXT ·asmcgocall(SB),NOSPLIT,$0-20
-       MOVQ    fn+0(FP), AX
-       MOVQ    arg+8(FP), BX
-
-       MOVQ    SP, DX
-
        // Figure out if we need to switch to m->g0 stack.
        // We get called to create new OS threads too, and those
        // come in on the m->g0 stack already. Or we might already
@@ -938,6 +963,21 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
        CMPQ    DI, SI
        JEQ     nosave
 
+       // Running on a user G
+       // Figure out if we're running secret code and clear the registers
+       // so that the C code we're about to call doesn't spill confidential
+       // information into memory
+#ifdef GOEXPERIMENT_runtimesecret
+       CMPL    g_secret(DI), $0
+       JEQ     nosecret
+       CALL    ·secretEraseRegisters(SB)
+
+nosecret:
+#endif
+       MOVQ    fn+0(FP), AX
+       MOVQ    arg+8(FP), BX
+       MOVQ    SP, DX
+
        // Switch to system stack.
        // The original frame pointer is stored in BP,
        // which is useful for stack unwinding.
@@ -976,6 +1016,10 @@ nosave:
        // but then the only path through this code would be a rare case on Solaris.
        // Using this code for all "already on system stack" calls exercises it more,
        // which should help keep it correct.
+       MOVQ    fn+0(FP), AX
+       MOVQ    arg+8(FP), BX
+       MOVQ    SP, DX
+
        SUBQ    $16, SP
        ANDQ    $~15, SP
        MOVQ    $0, 8(SP)               // where above code stores g, in case someone looks during debugging
index 01f2690f4e2b1710327df16d6b8f01fef51151a1..9916378a93a3d02ec522cfe548398ae3235ee2c5 100644 (file)
@@ -300,6 +300,17 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
 // Fn must never return. It should gogo(&g->sched)
 // to keep running g.
 TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
+#ifdef GOEXPERIMENT_runtimesecret
+       MOVW    g_secret(g), R26
+       CBZ     R26, nosecret
+       // Use R26 as a secondary link register
+       // We purposefully don't erase it in secretEraseRegistersMcall
+       MOVD    LR, R26
+       BL      runtime·secretEraseRegistersMcall(SB)
+       MOVD    R26, LR
+
+nosecret:
+#endif
        MOVD    R0, R26                         // context
 
        // Save caller state in g->sched
@@ -340,6 +351,13 @@ TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
 
 // func systemstack(fn func())
 TEXT runtime·systemstack(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_runtimesecret
+       MOVW    g_secret(g), R3
+       CBZ             R3, nosecret
+       BL              ·secretEraseRegisters(SB)
+
+nosecret:
+#endif
        MOVD    fn+0(FP), R3    // R3 = fn
        MOVD    R3, R26         // context
        MOVD    g_m(g), R4      // R4 = m
@@ -469,6 +487,16 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
        MOVD    R0, (m_morebuf+gobuf_sp)(R8)    // f's caller's RSP
        MOVD    g, (m_morebuf+gobuf_g)(R8)
 
+       // If in secret mode, erase registers on transition
+       // from G stack to M stack,
+#ifdef GOEXPERIMENT_runtimesecret
+       MOVW    g_secret(g), R4
+       CBZ     R4, nosecret
+       BL      ·secretEraseRegisters(SB)
+       MOVD    g_m(g), R8
+nosecret:
+#endif
+
        // Call newstack on m->g0's stack.
        MOVD    m_g0(R8), g
        BL      runtime·save_g(SB)
@@ -1143,12 +1171,7 @@ TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
 // aligned appropriately for the gcc ABI.
 // See cgocall.go for more details.
 TEXT ·asmcgocall(SB),NOSPLIT,$0-20
-       MOVD    fn+0(FP), R1
-       MOVD    arg+8(FP), R0
-
-       MOVD    RSP, R2         // save original stack pointer
        CBZ     g, nosave
-       MOVD    g, R4
 
        // Figure out if we need to switch to m->g0 stack.
        // We get called to create new OS threads too, and those
@@ -1162,6 +1185,23 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
        CMP     R3, g
        BEQ     nosave
 
+       // running on a user stack. Figure out if we're running
+       // secret code and clear our registers if so.
+#ifdef GOEXPERIMENT_runtimesecret
+       MOVW    g_secret(g), R5
+       CBZ             R5, nosecret
+       BL      ·secretEraseRegisters(SB)
+       // restore g0 back into R3
+       MOVD    g_m(g), R3
+       MOVD    m_g0(R3), R3
+
+nosecret:
+#endif
+       MOVD    fn+0(FP), R1
+       MOVD    arg+8(FP), R0
+       MOVD    RSP, R2
+       MOVD    g, R4
+
        // Switch to system stack.
        MOVD    R0, R9  // gosave_systemstack_switch<> and save_g might clobber R0
        BL      gosave_systemstack_switch<>(SB)
@@ -1208,7 +1248,10 @@ nosave:
        // but then the only path through this code would be a rare case on Solaris.
        // Using this code for all "already on system stack" calls exercises it more,
        // which should help keep it correct.
-       MOVD    RSP, R13
+       MOVD    fn+0(FP), R1
+       MOVD    arg+8(FP), R0
+       MOVD    RSP, R2
+       MOVD    R2, R13
        SUB     $16, R13
        MOVD    R13, RSP
        MOVD    $0, R4
index 4971e16c6aabea3d20bcf10e61bc9a5446a5a0c2..fd79356abab7644a76dc2a1ef4ad7e51bf6d8573 100644 (file)
@@ -1185,7 +1185,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
        } else {
                if size <= maxSmallSize-gc.MallocHeaderSize {
                        if typ == nil || !typ.Pointers() {
-                               if size < maxTinySize {
+                               // tiny allocations might be kept alive by other co-located values.
+                               // Make sure secret allocations get zeroed by avoiding the tiny allocator
+                               // See go.dev/issue/76356
+                               gp := getg()
+                               if size < maxTinySize && gp.secret == 0 {
                                        x, elemsize = mallocgcTiny(size, typ)
                                } else {
                                        x, elemsize = mallocgcSmallNoscan(size, typ, needzero)
@@ -1205,6 +1209,13 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                }
        }
 
+       gp := getg()
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               // Mark any object allocated while in secret mode as secret.
+               // This ensures we zero it immediately when freeing it.
+               addSecret(x)
+       }
+
        // Notify sanitizers, if enabled.
        if raceenabled {
                racemalloc(x, size-asanRZ)
index 5abb61257a415128a79ba82b8ef7216d9f4c7bd0..6864ca05d31901efa82fdb64ce4ea73c609462a0 100644 (file)
@@ -5,11 +5,19 @@ package runtime
 
 import (
        "internal/goarch"
+       "internal/goexperiment"
        "internal/runtime/sys"
        "unsafe"
 )
 
 func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -151,6 +159,11 @@ func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -168,6 +181,13 @@ func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -309,6 +329,11 @@ func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -326,6 +351,13 @@ func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -467,6 +499,11 @@ func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -484,6 +521,13 @@ func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -625,6 +669,11 @@ func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -642,6 +691,13 @@ func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -783,6 +839,11 @@ func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -800,6 +861,13 @@ func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -941,6 +1009,11 @@ func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -958,6 +1031,13 @@ func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -1099,6 +1179,11 @@ func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -1116,6 +1201,13 @@ func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -1257,6 +1349,11 @@ func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -1274,6 +1371,13 @@ func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -1415,6 +1519,11 @@ func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsaf
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -1432,6 +1541,13 @@ func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsaf
 }
 
 func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -1573,6 +1689,11 @@ func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -1590,6 +1711,13 @@ func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -1731,6 +1859,11 @@ func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -1748,6 +1881,13 @@ func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -1889,6 +2029,11 @@ func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -1906,6 +2051,13 @@ func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -2047,6 +2199,11 @@ func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -2064,6 +2221,13 @@ func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -2205,6 +2369,11 @@ func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -2222,6 +2391,13 @@ func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -2363,6 +2539,11 @@ func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -2380,6 +2561,13 @@ func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -2521,6 +2709,11 @@ func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -2538,6 +2731,13 @@ func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -2679,6 +2879,11 @@ func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -2696,6 +2901,13 @@ func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -2837,6 +3049,11 @@ func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -2854,6 +3071,13 @@ func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -2995,6 +3219,11 @@ func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -3012,6 +3241,13 @@ func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -3153,6 +3389,11 @@ func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -3170,6 +3411,13 @@ func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -3311,6 +3559,11 @@ func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -3328,6 +3581,13 @@ func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -3469,6 +3729,11 @@ func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -3486,6 +3751,13 @@ func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -3627,6 +3899,11 @@ func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -3644,6 +3921,13 @@ func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -3785,6 +4069,11 @@ func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -3802,6 +4091,13 @@ func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -3943,6 +4239,11 @@ func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -3960,6 +4261,13 @@ func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -4101,6 +4409,11 @@ func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsa
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -4118,6 +4431,13 @@ func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsa
 }
 
 func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -4169,6 +4489,11 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -4251,6 +4576,11 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -4268,6 +4598,13 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -4319,6 +4656,11 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -4401,6 +4743,11 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -4418,6 +4765,13 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -4469,6 +4823,11 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -4551,6 +4910,11 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -4568,6 +4932,13 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -4619,6 +4990,11 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -4701,6 +5077,11 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -4718,6 +5099,13 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -4769,6 +5157,11 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -4851,6 +5244,11 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -4868,6 +5266,13 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -4919,6 +5324,11 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -5001,6 +5411,11 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -5018,6 +5433,13 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -5069,6 +5491,11 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -5151,6 +5578,11 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -5168,6 +5600,13 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -5219,6 +5658,11 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -5301,6 +5745,11 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -5318,6 +5767,13 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -5369,6 +5825,11 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -5451,6 +5912,11 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -5468,6 +5934,13 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -5519,6 +5992,11 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -5601,6 +6079,11 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -5618,6 +6101,13 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -5669,6 +6159,11 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -5751,6 +6246,11 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -5768,6 +6268,13 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -5819,6 +6326,11 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -5901,6 +6413,11 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -5918,6 +6435,13 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -5969,6 +6493,11 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6051,6 +6580,11 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6068,6 +6602,13 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -6119,6 +6660,11 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6201,6 +6747,11 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6218,6 +6769,13 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 1 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -6269,6 +6827,11 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
                const elemsize = 0
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6351,6 +6914,11 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
                x = add(x, elemsize-constsize)
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6368,6 +6936,13 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 }
 
 func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -6409,6 +6984,11 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6474,6 +7054,11 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6491,6 +7076,13 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -6532,6 +7124,11 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6597,6 +7194,11 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6614,6 +7216,13 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -6655,6 +7264,11 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6720,6 +7334,11 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6737,6 +7356,13 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -6778,6 +7404,11 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6843,6 +7474,11 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6860,6 +7496,13 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -6901,6 +7544,11 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -6966,6 +7614,11 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -6983,6 +7636,13 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7024,6 +7684,11 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7089,6 +7754,11 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7106,6 +7776,13 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7147,6 +7824,11 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7212,6 +7894,11 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7229,6 +7916,13 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7270,6 +7964,11 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7335,6 +8034,11 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7352,6 +8056,13 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin
 }
 
 func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7393,6 +8104,11 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7458,6 +8174,11 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7475,6 +8196,13 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7516,6 +8244,11 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7581,6 +8314,11 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7598,6 +8336,13 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7639,6 +8384,11 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7704,6 +8454,11 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7721,6 +8476,13 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7762,6 +8524,11 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7827,6 +8594,11 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7844,6 +8616,13 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -7885,6 +8664,11 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -7950,6 +8734,11 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -7967,6 +8756,13 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8008,6 +8804,11 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8073,6 +8874,11 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8090,6 +8896,13 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8131,6 +8944,11 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8196,6 +9014,11 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8213,6 +9036,13 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8254,6 +9084,11 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8319,6 +9154,11 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8336,6 +9176,13 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8377,6 +9224,11 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8442,6 +9294,11 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8459,6 +9316,13 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8500,6 +9364,11 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8565,6 +9434,11 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8582,6 +9456,13 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8623,6 +9504,11 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8688,6 +9574,11 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8705,6 +9596,13 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8746,6 +9644,11 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8811,6 +9714,11 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8828,6 +9736,13 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8869,6 +9784,11 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -8934,6 +9854,11 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -8951,6 +9876,13 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -8992,6 +9924,11 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -9057,6 +9994,11 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -9074,6 +10016,13 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -9115,6 +10064,11 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -9180,6 +10134,11 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -9197,6 +10156,13 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -9238,6 +10204,11 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -9303,6 +10274,11 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
@@ -9320,6 +10296,13 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi
 }
 
 func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       const isTiny = 0 ==
+               1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -9361,6 +10344,11 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi
                x := v
                {
 
+                       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+                               addSecret(x)
+                       }
+
                        if valgrindenabled {
                                valgrindMalloc(x, size)
                        }
@@ -9426,6 +10414,11 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi
                        gcStart(t)
                }
        }
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+
+               addSecret(x)
+       }
+
        if valgrindenabled {
                valgrindMalloc(x, size)
        }
index e9752956b82e0d745de41f20fc1ef34d6ef7e441..58ca1d5f79f805a9296505504a49ad896a928bdb 100644 (file)
@@ -22,6 +22,7 @@ package runtime
 
 import (
        "internal/goarch"
+       "internal/goexperiment"
        "internal/runtime/sys"
        "unsafe"
 )
@@ -36,6 +37,7 @@ const elemsize_ = 8
 const sizeclass_ = 0
 const noscanint_ = 0
 const size_ = 0
+const isTiny_ = 0
 
 func malloc0(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
        if doubleCheckMalloc {
@@ -55,6 +57,17 @@ func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 // WARNING: mallocStub does not do any work for sanitizers so callers need
 // to steer out of this codepath early if sanitizers are enabled.
 func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+
+       // secret code, need to avoid the tiny allocator since it might keep
+       // co-located values alive longer and prevent timely zero-ing
+       //
+       // Call directly into the NoScan allocator.
+       // See go.dev/issue/76356
+       const isTiny = isTiny_ == 1
+       gp := getg()
+       if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 {
+               return mallocgcSmallNoScanSC2(size, typ, needzero)
+       }
        if doubleCheckMalloc {
                if gcphase == _GCmarktermination {
                        throw("mallocgc called with gcphase == _GCmarktermination")
@@ -82,6 +95,12 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
        // Actually do the allocation.
        x, elemsize := inlinedMalloc(size, typ, needzero)
 
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               // Mark any object allocated while in secret mode as secret.
+               // This ensures we zero it immediately when freeing it.
+               addSecret(x)
+       }
+
        // Notify valgrind, if enabled.
        // To allow the compiler to not know about valgrind, we do valgrind instrumentation
        // unlike the other sanitizers.
index febcd9558c09763ef0be646899c091508d85464d..32cd8cb0e892647954bef0e6806c7453eb41e2a4 100644 (file)
@@ -838,6 +838,33 @@ func gcStart(trigger gcTrigger) {
        // Accumulate fine-grained stopping time.
        work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
 
+       if goexperiment.RuntimeSecret {
+               // The world is stopped. Every M is either parked
+               // or in a syscall, or running some non-go code which can't run in secret mode.
+               // To get to a parked or a syscall state
+               // they have to transition through a point where we erase any
+               // confidential information in the registers. Making them
+               // handle a signal now would clobber the signal stack
+               // with non-confidential information.
+               //
+               // TODO(dmo): this is linear with respect to the number of Ms.
+               // Investigate just how long this takes and whether we can somehow
+               // loop over just the Ms that have secret info on their signal stack,
+               // or cooperatively have the Ms send signals to themselves just
+               // after they erase their registers, but before they enter a syscall
+               for mp := allm; mp != nil; mp = mp.alllink {
+                       // even through the world is stopped, the kernel can still
+                       // invoke our signal handlers. No confidential information can be spilled
+                       // (because it's been erased by this time), but we can avoid
+                       // sending additional signals by atomically inspecting this variable
+                       if atomic.Xchg(&mp.signalSecret, 0) != 0 {
+                               noopSignal(mp)
+                       }
+                       // TODO: syncronize with the signal handler to ensure that the signal
+                       // was actually delivered.
+               }
+       }
+
        // Finish sweep before we start concurrent scan.
        systemstack(func() {
                finishsweep_m()
index 0ccaadc891ba9d1e1c24640c175b28154cff3084..61dc5457fc14ca635290070858bbc1e3613b0188 100644 (file)
@@ -225,6 +225,7 @@ type mheap struct {
        specialPinCounterAlloc     fixalloc // allocator for specialPinCounter
        specialWeakHandleAlloc     fixalloc // allocator for specialWeakHandle
        specialBubbleAlloc         fixalloc // allocator for specialBubble
+       specialSecretAlloc         fixalloc // allocator for specialSecret
        speciallock                mutex    // lock for special record allocators.
        arenaHintAlloc             fixalloc // allocator for arenaHints
 
@@ -803,6 +804,7 @@ func (h *mheap) init() {
        h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
        h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
        h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
+       h.specialSecretAlloc.init(unsafe.Sizeof(specialSecret{}), nil, nil, &memstats.other_sys)
        h.specialWeakHandleAlloc.init(unsafe.Sizeof(specialWeakHandle{}), nil, nil, &memstats.gcMiscSys)
        h.specialBubbleAlloc.init(unsafe.Sizeof(specialBubble{}), nil, nil, &memstats.other_sys)
        h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
@@ -1970,6 +1972,9 @@ const (
        _KindSpecialCheckFinalizer = 8
        // _KindSpecialBubble is used to associate objects with synctest bubbles.
        _KindSpecialBubble = 9
+       // _KindSpecialSecret is a special used to mark an object
+       // as needing zeroing immediately upon freeing.
+       _KindSpecialSecret = 10
 )
 
 type special struct {
@@ -2822,6 +2827,11 @@ func freeSpecial(s *special, p unsafe.Pointer, size uintptr) {
                lock(&mheap_.speciallock)
                mheap_.specialBubbleAlloc.free(unsafe.Pointer(st))
                unlock(&mheap_.speciallock)
+       case _KindSpecialSecret:
+               memclrNoHeapPointers(p, size)
+               lock(&mheap_.speciallock)
+               mheap_.specialSecretAlloc.free(unsafe.Pointer(s))
+               unlock(&mheap_.speciallock)
        default:
                throw("bad special kind")
                panic("not reached")
index 447c7399fcb9029a826476b3fc8fe5809cc7c184..892f9000731b13850f54bc0fa6100977d38771fa 100644 (file)
@@ -55,6 +55,7 @@ package runtime
 import (
        "internal/abi"
        "internal/goarch"
+       "internal/goexperiment"
        "internal/stringslite"
 )
 
@@ -406,6 +407,22 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
                return false, 0
        }
 
+       // If we're in the middle of a secret computation, we can't
+       // allow any conservative scanning of stacks, as that may lead
+       // to secrets leaking out from the stack into work buffers.
+       // Additionally, the preemption code will store the
+       // machine state (including registers which may contain confidential
+       // information) into the preemption buffers.
+       //
+       // TODO(dmo): there's technically nothing stopping us from doing the
+       // preemption, granted that don't conservatively scan and we clean up after
+       // ourselves. This is made slightly harder by the xRegs cached allocations
+       // that can move between Gs and Ps. In any case, for the intended users (cryptography code)
+       // they are unlikely get stuck in unterminating loops.
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               return false, 0
+       }
+
        // Check if PC is an unsafe-point.
        f := findfunc(pc)
        if !f.valid() {
index 3b98be107483112213dfbe00b8e29ad20e55e706..16538098cf6c152627d8936cdf15a635f9de24fd 100644 (file)
@@ -8,6 +8,7 @@ import (
        "internal/abi"
        "internal/cpu"
        "internal/goarch"
+       "internal/goexperiment"
        "internal/goos"
        "internal/runtime/atomic"
        "internal/runtime/exithook"
@@ -4454,6 +4455,13 @@ func goexit1() {
 
 // goexit continuation on g0.
 func goexit0(gp *g) {
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               // Erase the whole stack. This path only occurs when
+               // runtime.Goexit is called from within a runtime/secret.Do call.
+               memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
+               // Since this is running on g0, our registers are already zeroed from going through
+               // mcall in secret mode.
+       }
        gdestroy(gp)
        schedule()
 }
@@ -4482,6 +4490,7 @@ func gdestroy(gp *g) {
        gp.timer = nil
        gp.bubble = nil
        gp.fipsOnlyBypass = false
+       gp.secret = 0
 
        if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
                // Flush assist credit to the global pool. This gives
@@ -5216,6 +5225,10 @@ func malg(stacksize int32) *g {
 // The compiler turns a go statement into a call to this.
 func newproc(fn *funcval) {
        gp := getg()
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               panic("goroutine spawned while running in secret mode")
+       }
+
        pc := sys.GetCallerPC()
        systemstack(func() {
                newg := newproc1(fn, gp, pc, false, waitReasonZero)
index 58eaf80237234b41e91a50cd3f956ec502ef58c8..cd75e2dd7c5a5e3ff428c6ccadcd1c9fcadb245d 100644 (file)
@@ -549,6 +549,7 @@ type g struct {
        syncSafePoint   bool // set if g is stopped at a synchronous safe point.
        runningCleanups atomic.Bool
        sig             uint32
+       secret          int32 // current nesting of runtime/secret.Do calls.
        writebuf        []byte
        sigcode0        uintptr
        sigcode1        uintptr
@@ -620,14 +621,15 @@ type m struct {
 
        // Fields whose offsets are not known to debuggers.
 
-       procid     uint64            // for debuggers, but offset not hard-coded
-       gsignal    *g                // signal-handling g
-       goSigStack gsignalStack      // Go-allocated signal handling stack
-       sigmask    sigset            // storage for saved signal mask
-       tls        [tlsSlots]uintptr // thread-local storage (for x86 extern register)
-       mstartfn   func()
-       curg       *g       // current running goroutine
-       caughtsig  guintptr // goroutine running during fatal signal
+       procid       uint64            // for debuggers, but offset not hard-coded
+       gsignal      *g                // signal-handling g
+       goSigStack   gsignalStack      // Go-allocated signal handling stack
+       sigmask      sigset            // storage for saved signal mask
+       tls          [tlsSlots]uintptr // thread-local storage (for x86 extern register)
+       mstartfn     func()
+       curg         *g       // current running goroutine
+       caughtsig    guintptr // goroutine running during fatal signal
+       signalSecret uint32   // whether we have secret information in our signal stack
 
        // p is the currently attached P for executing Go code, nil if not executing user Go code.
        //
diff --git a/src/runtime/secret.go b/src/runtime/secret.go
new file mode 100644 (file)
index 0000000..4c199d3
--- /dev/null
@@ -0,0 +1,118 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (amd64 || arm64) && linux
+
+package runtime
+
+import (
+       "internal/goarch"
+       "unsafe"
+)
+
+//go:linkname secret_count runtime/secret.count
+func secret_count() int32 {
+       return getg().secret
+}
+
+//go:linkname secret_inc runtime/secret.inc
+func secret_inc() {
+       gp := getg()
+       gp.secret++
+}
+
+//go:linkname secret_dec runtime/secret.dec
+func secret_dec() {
+       gp := getg()
+       gp.secret--
+}
+
+//go:linkname secret_eraseSecrets runtime/secret.eraseSecrets
+func secret_eraseSecrets() {
+       // zero all the stack memory that might be dirtied with
+       // secrets. We do this from the systemstack so that we
+       // don't have to figure out which holes we have to keep
+       // to ensure that we can return from memclr. gp.sched will
+       // act as a pigeonhole for our actual return.
+       lo := getg().stack.lo
+       systemstack(func() {
+               // Note, this systemstack call happens within the secret mode,
+               // so we don't have to call out to erase our registers, the systemstack
+               // code will do that.
+               mp := acquirem()
+               sp := mp.curg.sched.sp
+               // we need to keep systemstack return on top of the stack being cleared
+               // for traceback
+               sp -= goarch.PtrSize
+               // TODO: keep some sort of low water mark so that we don't have
+               // to zero a potentially large stack if we used just a little
+               // bit of it. That will allow us to use a higher value for
+               // lo than gp.stack.lo.
+               memclrNoHeapPointers(unsafe.Pointer(lo), sp-lo)
+               releasem(mp)
+       })
+       // Don't put any code here: the stack frame's contents are gone!
+}
+
+// specialSecret tracks whether we need to zero an object immediately
+// upon freeing.
+type specialSecret struct {
+       special special
+}
+
+// addSecret records the fact that we need to zero p immediately
+// when it is freed.
+func addSecret(p unsafe.Pointer) {
+       // TODO(dmo): figure out the cost of these. These are mostly
+       // intended to catch allocations that happen via the runtime
+       // that the user has no control over and not big buffers that user
+       // code is allocating. The cost should be relatively low,
+       // but we have run into a wall with other special allocations before.
+       lock(&mheap_.speciallock)
+       s := (*specialSecret)(mheap_.specialSecretAlloc.alloc())
+       s.special.kind = _KindSpecialSecret
+       unlock(&mheap_.speciallock)
+       addspecial(p, &s.special, false)
+}
+
+// send a no-op signal to an M for the purposes of
+// clobbering the signal stack
+//
+// Use sigpreempt. If we don't have a preemption queued, this just
+// turns into a no-op
+func noopSignal(mp *m) {
+       signalM(mp, sigPreempt)
+}
+
+// secret_getStack returns the memory range of the
+// current goroutine's stack.
+// For testing only.
+// Note that this is kind of tricky, as the goroutine can
+// be copied and/or exit before the result is used, at which
+// point it may no longer be valid.
+//
+//go:linkname secret_getStack runtime/secret.getStack
+func secret_getStack() (uintptr, uintptr) {
+       gp := getg()
+       return gp.stack.lo, gp.stack.hi
+}
+
+// return a slice of all Ms signal stacks
+// For testing only.
+//
+//go:linkname secret_appendSignalStacks runtime/secret.appendSignalStacks
+func secret_appendSignalStacks(sigstacks []stack) []stack {
+       // This is probably overkill, but it's what
+       // doAllThreadsSyscall does
+       stw := stopTheWorld(stwAllThreadsSyscall)
+       allocmLock.lock()
+       acquirem()
+       for mp := allm; mp != nil; mp = mp.alllink {
+               sigstacks = append(sigstacks, mp.gsignal.stack)
+       }
+       releasem(getg().m)
+       allocmLock.unlock()
+       startTheWorld(stw)
+       return sigstacks
+}
diff --git a/src/runtime/secret/asm_amd64.s b/src/runtime/secret/asm_amd64.s
new file mode 100644 (file)
index 0000000..7011afc
--- /dev/null
@@ -0,0 +1,213 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Note: this assembly file is used for testing only.
+// We need to access registers directly to properly test
+// that secrets are erased and go test doesn't like to conditionally
+// include assembly files.
+// These functions defined in the package proper and we
+// rely on the linker to prune these away in regular builds
+
+#include "go_asm.h"
+#include "funcdata.h"
+
+TEXT ·loadRegisters(SB),0,$0-8
+       MOVQ    p+0(FP), AX
+
+       MOVQ    (AX), R10
+       MOVQ    (AX), R11
+       MOVQ    (AX), R12
+       MOVQ    (AX), R13
+
+       MOVOU   (AX), X1
+       MOVOU   (AX), X2
+       MOVOU   (AX), X3
+       MOVOU   (AX), X4
+
+       CMPB    internal∕cpu·X86+const_offsetX86HasAVX(SB), $1
+       JNE     return
+
+       VMOVDQU (AX), Y5
+       VMOVDQU (AX), Y6
+       VMOVDQU (AX), Y7
+       VMOVDQU (AX), Y8
+
+       CMPB    internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+       JNE     return
+
+       VMOVUPD (AX), Z14
+       VMOVUPD (AX), Z15
+       VMOVUPD (AX), Z16
+       VMOVUPD (AX), Z17
+
+       KMOVQ   (AX), K2
+       KMOVQ   (AX), K3
+       KMOVQ   (AX), K4
+       KMOVQ   (AX), K5
+
+return:
+       RET
+
+TEXT ·spillRegisters(SB),0,$0-16
+       MOVQ    p+0(FP), AX
+       MOVQ    AX, BX
+
+       MOVQ    R10, (AX)
+       MOVQ    R11, 8(AX)
+       MOVQ    R12, 16(AX)
+       MOVQ    R13, 24(AX)
+       ADDQ    $32, AX
+
+       MOVOU   X1, (AX)
+       MOVOU   X2, 16(AX)
+       MOVOU   X3, 32(AX)
+       MOVOU   X4, 48(AX)
+       ADDQ    $64, AX
+
+       CMPB    internal∕cpu·X86+const_offsetX86HasAVX(SB), $1
+       JNE     return
+
+       VMOVDQU Y5, (AX)
+       VMOVDQU Y6, 32(AX)
+       VMOVDQU Y7, 64(AX)
+       VMOVDQU Y8, 96(AX)
+       ADDQ    $128, AX
+
+       CMPB    internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+       JNE     return
+
+       VMOVUPD Z14, (AX)
+       ADDQ    $64, AX
+       VMOVUPD Z15, (AX)
+       ADDQ    $64, AX
+       VMOVUPD Z16, (AX)
+       ADDQ    $64, AX
+       VMOVUPD Z17, (AX)
+       ADDQ    $64, AX
+
+       KMOVQ   K2, (AX)
+       ADDQ    $8, AX
+       KMOVQ   K3, (AX)
+       ADDQ    $8, AX
+       KMOVQ   K4, (AX)
+       ADDQ    $8, AX
+       KMOVQ   K5, (AX)
+       ADDQ    $8, AX
+
+return:
+       SUBQ    BX, AX
+       MOVQ    AX, ret+8(FP)
+       RET
+
+TEXT ·useSecret(SB),0,$64-24
+       NO_LOCAL_POINTERS
+
+       // Load secret into AX
+       MOVQ    secret_base+0(FP), AX
+       MOVQ    (AX), AX
+
+       // Scatter secret all across registers.
+       // Increment low byte so we can tell which register
+       // a leaking secret came from.
+       ADDQ    $2, AX // add 2 so Rn has secret #n.
+       MOVQ    AX, BX
+       INCQ    AX
+       MOVQ    AX, CX
+       INCQ    AX
+       MOVQ    AX, DX
+       INCQ    AX
+       MOVQ    AX, SI
+       INCQ    AX
+       MOVQ    AX, DI
+       INCQ    AX
+       MOVQ    AX, BP
+       INCQ    AX
+       MOVQ    AX, R8
+       INCQ    AX
+       MOVQ    AX, R9
+       INCQ    AX
+       MOVQ    AX, R10
+       INCQ    AX
+       MOVQ    AX, R11
+       INCQ    AX
+       MOVQ    AX, R12
+       INCQ    AX
+       MOVQ    AX, R13
+       INCQ    AX
+       MOVQ    AX, R14
+       INCQ    AX
+       MOVQ    AX, R15
+
+       CMPB    internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+       JNE     noavx512
+       VMOVUPD (SP), Z0
+       VMOVUPD (SP), Z1
+       VMOVUPD (SP), Z2
+       VMOVUPD (SP), Z3
+       VMOVUPD (SP), Z4
+       VMOVUPD (SP), Z5
+       VMOVUPD (SP), Z6
+       VMOVUPD (SP), Z7
+       VMOVUPD (SP), Z8
+       VMOVUPD (SP), Z9
+       VMOVUPD (SP), Z10
+       VMOVUPD (SP), Z11
+       VMOVUPD (SP), Z12
+       VMOVUPD (SP), Z13
+       VMOVUPD (SP), Z14
+       VMOVUPD (SP), Z15
+       VMOVUPD (SP), Z16
+       VMOVUPD (SP), Z17
+       VMOVUPD (SP), Z18
+       VMOVUPD (SP), Z19
+       VMOVUPD (SP), Z20
+       VMOVUPD (SP), Z21
+       VMOVUPD (SP), Z22
+       VMOVUPD (SP), Z23
+       VMOVUPD (SP), Z24
+       VMOVUPD (SP), Z25
+       VMOVUPD (SP), Z26
+       VMOVUPD (SP), Z27
+       VMOVUPD (SP), Z28
+       VMOVUPD (SP), Z29
+       VMOVUPD (SP), Z30
+       VMOVUPD (SP), Z31
+
+noavx512:
+       MOVOU   (SP), X0
+       MOVOU   (SP), X1
+       MOVOU   (SP), X2
+       MOVOU   (SP), X3
+       MOVOU   (SP), X4
+       MOVOU   (SP), X5
+       MOVOU   (SP), X6
+       MOVOU   (SP), X7
+       MOVOU   (SP), X8
+       MOVOU   (SP), X9
+       MOVOU   (SP), X10
+       MOVOU   (SP), X11
+       MOVOU   (SP), X12
+       MOVOU   (SP), X13
+       MOVOU   (SP), X14
+       MOVOU   (SP), X15
+
+       // Put secret on the stack.
+       INCQ    AX
+       MOVQ    AX, (SP)
+       MOVQ    AX, 8(SP)
+       MOVQ    AX, 16(SP)
+       MOVQ    AX, 24(SP)
+       MOVQ    AX, 32(SP)
+       MOVQ    AX, 40(SP)
+       MOVQ    AX, 48(SP)
+       MOVQ    AX, 56(SP)
+
+       // Delay a bit.  This makes it more likely that
+       // we will be the target of a signal while
+       // registers contain secrets.
+       // It also tests the path from G stack to M stack
+       // to scheduler and back.
+       CALL    ·delay(SB)
+
+       RET
diff --git a/src/runtime/secret/asm_arm64.s b/src/runtime/secret/asm_arm64.s
new file mode 100644 (file)
index 0000000..1d7f7c1
--- /dev/null
@@ -0,0 +1,167 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Note: this assembly file is used for testing only.
+// We need to access registers directly to properly test
+// that secrets are erased and go test doesn't like to conditionally
+// include assembly files.
+// These functions defined in the package proper and we
+// rely on the linker to prune these away in regular builds
+
+#include "go_asm.h"
+#include "funcdata.h"
+
+TEXT ·loadRegisters(SB),0,$0-8
+       MOVD    p+0(FP), R0
+
+       MOVD    (R0), R10
+       MOVD    (R0), R11
+       MOVD    (R0), R12
+       MOVD    (R0), R13
+
+       FMOVD   (R0), F15
+       FMOVD   (R0), F16
+       FMOVD   (R0), F17
+       FMOVD   (R0), F18
+
+       VLD1    (R0), [V20.B16]
+       VLD1    (R0), [V21.H8]
+       VLD1    (R0), [V22.S4]
+       VLD1    (R0), [V23.D2]
+
+       RET
+
+TEXT ·spillRegisters(SB),0,$0-16
+       MOVD    p+0(FP), R0
+       MOVD    R0, R1
+
+       MOVD    R10, (R0)
+       MOVD    R11, 8(R0)
+       MOVD    R12, 16(R0)
+       MOVD    R13, 24(R0)
+       ADD     $32, R0
+
+       FMOVD   F15, (R0)
+       FMOVD   F16, 16(R0)
+       FMOVD   F17, 32(R0)
+       FMOVD   F18, 64(R0)
+       ADD     $64, R0
+
+       VST1.P  [V20.B16], (R0)
+       VST1.P  [V21.H8], (R0)
+       VST1.P  [V22.S4], (R0)
+       VST1.P  [V23.D2], (R0)
+
+       SUB     R1, R0, R0
+       MOVD    R0, ret+8(FP)
+       RET
+
+TEXT ·useSecret(SB),0,$0-24
+       NO_LOCAL_POINTERS
+
+       // Load secret into R0
+       MOVD    secret_base+0(FP), R0
+       MOVD    (R0), R0
+       // Scatter secret across registers.
+       // Increment low byte so we can tell which register
+       // a leaking secret came from.
+
+       // TODO(dmo): more substantial dirtying here
+       ADD     $1, R0
+       MOVD    R0, R1
+       ADD     $1, R0
+       MOVD    R0, R2
+       ADD     $1, R0
+       MOVD    R0, R3
+       ADD     $1, R0
+       MOVD    R0, R4
+       ADD     $1, R0
+       MOVD    R0, R5
+       ADD     $1, R0
+       MOVD    R0, R6
+       ADD     $1, R0
+       MOVD    R0, R7
+       ADD     $1, R0
+       MOVD    R0, R8
+       ADD     $1, R0
+       MOVD    R0, R9
+       ADD     $1, R0
+       MOVD    R0, R10
+       ADD     $1, R0
+       MOVD    R0, R11
+       ADD     $1, R0
+       MOVD    R0, R12
+       ADD     $1, R0
+       MOVD    R0, R13
+       ADD     $1, R0
+       MOVD    R0, R14
+       ADD     $1, R0
+       MOVD    R0, R15
+
+       // Dirty the floating point registers
+       ADD     $1, R0
+       FMOVD   R0, F0
+       ADD     $1, R0
+       FMOVD   R0, F1
+       ADD     $1, R0
+       FMOVD   R0, F2
+       ADD     $1, R0
+       FMOVD   R0, F3
+       ADD     $1, R0
+       FMOVD   R0, F4
+       ADD     $1, R0
+       FMOVD   R0, F5
+       ADD     $1, R0
+       FMOVD   R0, F6
+       ADD     $1, R0
+       FMOVD   R0, F7
+       ADD     $1, R0
+       FMOVD   R0, F8
+       ADD     $1, R0
+       FMOVD   R0, F9
+       ADD     $1, R0
+       FMOVD   R0, F10
+       ADD     $1, R0
+       FMOVD   R0, F11
+       ADD     $1, R0
+       FMOVD   R0, F12
+       ADD     $1, R0
+       FMOVD   R0, F13
+       ADD     $1, R0
+       FMOVD   R0, F14
+       ADD     $1, R0
+       FMOVD   R0, F15
+       ADD     $1, R0
+       FMOVD   R0, F16
+       ADD     $1, R0
+       FMOVD   R0, F17
+       ADD     $1, R0
+       FMOVD   R0, F18
+       ADD     $1, R0
+       FMOVD   R0, F19
+       ADD     $1, R0
+       FMOVD   R0, F20
+       ADD     $1, R0
+       FMOVD   R0, F21
+       ADD     $1, R0
+       FMOVD   R0, F22
+       ADD     $1, R0
+       FMOVD   R0, F23
+       ADD     $1, R0
+       FMOVD   R0, F24
+       ADD     $1, R0
+       FMOVD   R0, F25
+       ADD     $1, R0
+       FMOVD   R0, F26
+       ADD     $1, R0
+       FMOVD   R0, F27
+       ADD     $1, R0
+       FMOVD   R0, F28
+       ADD     $1, R0
+       FMOVD   R0, F29
+       ADD     $1, R0
+       FMOVD   R0, F30
+       ADD     $1, R0
+       FMOVD   R0, F31
+       RET
diff --git a/src/runtime/secret/crash_test.go b/src/runtime/secret/crash_test.go
new file mode 100644 (file)
index 0000000..1bd099a
--- /dev/null
@@ -0,0 +1,427 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.runtimesecret && linux
+
+package secret
+
+import (
+       "bytes"
+       "debug/elf"
+       "fmt"
+       "internal/testenv"
+       "io"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "runtime"
+       "strings"
+       "syscall"
+       "testing"
+)
+
+// Copied from runtime/runtime-gdb_unix_test.go
+func canGenerateCore(t *testing.T) bool {
+       // Ensure there is enough RLIMIT_CORE available to generate a full core.
+       var lim syscall.Rlimit
+       err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim)
+       if err != nil {
+               t.Fatalf("error getting rlimit: %v", err)
+       }
+       // Minimum RLIMIT_CORE max to allow. This is a conservative estimate.
+       // Most systems allow infinity.
+       const minRlimitCore = 100 << 20 // 100 MB
+       if lim.Max < minRlimitCore {
+               t.Skipf("RLIMIT_CORE max too low: %#+v", lim)
+       }
+
+       // Make sure core pattern will send core to the current directory.
+       b, err := os.ReadFile("/proc/sys/kernel/core_pattern")
+       if err != nil {
+               t.Fatalf("error reading core_pattern: %v", err)
+       }
+       if string(b) != "core\n" {
+               t.Skipf("Unexpected core pattern %q", string(b))
+       }
+
+       coreUsesPID := false
+       b, err = os.ReadFile("/proc/sys/kernel/core_uses_pid")
+       if err == nil {
+               switch string(bytes.TrimSpace(b)) {
+               case "0":
+               case "1":
+                       coreUsesPID = true
+               default:
+                       t.Skipf("unexpected core_uses_pid value %q", string(b))
+               }
+       }
+       return coreUsesPID
+}
+
+func TestCore(t *testing.T) {
+       // use secret, grab a coredump, rummage through
+       // it, trying to find our secret.
+
+       switch runtime.GOARCH {
+       case "amd64", "arm64":
+       default:
+               t.Skip("unsupported arch")
+       }
+       coreUsesPid := canGenerateCore(t)
+
+       // Build our crashing program
+       // Because we need assembly files to properly dirty our state
+       // we need to construct a package in our temporary directory.
+       tmpDir := t.TempDir()
+       // copy our base source
+       err := copyToDir("./testdata/crash.go", tmpDir, nil)
+       if err != nil {
+               t.Fatalf("error copying directory %v", err)
+       }
+       // Copy our testing assembly files. Use the ones from the package
+       // to assure that they are always in sync
+       err = copyToDir("./asm_amd64.s", tmpDir, nil)
+       if err != nil {
+               t.Fatalf("error copying file %v", err)
+       }
+       err = copyToDir("./asm_arm64.s", tmpDir, nil)
+       if err != nil {
+               t.Fatalf("error copying file %v", err)
+       }
+       err = copyToDir("./stubs.go", tmpDir, func(s string) string {
+               return strings.Replace(s, "package secret", "package main", 1)
+       })
+       if err != nil {
+               t.Fatalf("error copying file %v", err)
+       }
+
+       // the crashing package will live out of tree, so its source files
+       // cannot refer to our internal packages. However, the assembly files
+       // can refer to internal names and we can pass the missing offsets as
+       // a small generated file
+       offsets := `
+       package main
+       const (
+               offsetX86HasAVX    = %v
+               offsetX86HasAVX512 = %v
+       )
+       `
+       err = os.WriteFile(filepath.Join(tmpDir, "offsets.go"), []byte(fmt.Sprintf(offsets, offsetX86HasAVX, offsetX86HasAVX512)), 0666)
+       if err != nil {
+               t.Fatalf("error writing offset file %v", err)
+       }
+
+       // generate go.mod file
+       cmd := exec.Command(testenv.GoToolPath(t), "mod", "init", "crashtest")
+       cmd.Dir = tmpDir
+       out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
+       if err != nil {
+               t.Fatalf("error initing module %v\n%s", err, out)
+       }
+
+       cmd = exec.Command(testenv.GoToolPath(t), "build", "-o", filepath.Join(tmpDir, "a.exe"))
+       cmd.Dir = tmpDir
+       out, err = testenv.CleanCmdEnv(cmd).CombinedOutput()
+       if err != nil {
+               t.Fatalf("error building source %v\n%s", err, out)
+       }
+
+       // Start the test binary.
+       cmd = testenv.CommandContext(t, t.Context(), "./a.exe")
+       cmd.Dir = tmpDir
+       var stdout strings.Builder
+       cmd.Stdout = &stdout
+       cmd.Stderr = &stdout
+
+       err = cmd.Run()
+       // For debugging.
+       t.Logf("\n\n\n--- START SUBPROCESS ---\n\n\n%s\n\n--- END SUBPROCESS ---\n\n\n", stdout.String())
+       if err == nil {
+               t.Fatalf("test binary did not crash")
+       }
+       eErr, ok := err.(*exec.ExitError)
+       if !ok {
+               t.Fatalf("error is not exit error: %v", err)
+       }
+       if eErr.Exited() {
+               t.Fatalf("process exited instead of being terminated: %v", eErr)
+       }
+
+       rummage(t, tmpDir, eErr.Pid(), coreUsesPid)
+}
+
+func copyToDir(name string, dir string, replace func(string) string) error {
+       f, err := os.ReadFile(name)
+       if err != nil {
+               return err
+       }
+       if replace != nil {
+               f = []byte(replace(string(f)))
+       }
+       return os.WriteFile(filepath.Join(dir, filepath.Base(name)), f, 0666)
+}
+
+type violation struct {
+       id  byte   // secret ID
+       off uint64 // offset in core dump
+}
+
+// A secret value that should never appear in a core dump,
+// except for this global variable itself.
+// The first byte of the secret is variable, to track
+// different instances of it.
+//
+// If this value is changed, update ./internal/crashsecret/main.go
+// TODO: this is little-endian specific.
+var secretStore = [8]byte{
+       0x00,
+       0x81,
+       0xa0,
+       0xc6,
+       0xb3,
+       0x01,
+       0x66,
+       0x53,
+}
+
+func rummage(t *testing.T, tmpDir string, pid int, coreUsesPid bool) {
+       coreFileName := "core"
+       if coreUsesPid {
+               coreFileName += fmt.Sprintf(".%d", pid)
+       }
+       core, err := os.Open(filepath.Join(tmpDir, coreFileName))
+       if err != nil {
+               t.Fatalf("core file not found: %v", err)
+       }
+       b, err := io.ReadAll(core)
+       if err != nil {
+               t.Fatalf("can't read core file: %v", err)
+       }
+
+       // Open elf view onto core file.
+       coreElf, err := elf.NewFile(core)
+       if err != nil {
+               t.Fatalf("can't parse core file: %v", err)
+       }
+
+       // Look for any places that have the secret.
+       var violations []violation // core file offsets where we found a secret
+       i := 0
+       for {
+               j := bytes.Index(b[i:], secretStore[1:])
+               if j < 0 {
+                       break
+               }
+               j--
+               i += j
+
+               t.Errorf("secret %d found at offset %x in core file", b[i], i)
+               violations = append(violations, violation{
+                       id:  b[i],
+                       off: uint64(i),
+               })
+
+               i += len(secretStore)
+       }
+
+       // Get more specific data about where in the core we found the secrets.
+       regions := elfRegions(t, core, coreElf)
+       for _, r := range regions {
+               for _, v := range violations {
+                       if v.off >= r.min && v.off < r.max {
+                               var addr string
+                               if r.addrMin != 0 {
+                                       addr = fmt.Sprintf(" addr=%x", r.addrMin+(v.off-r.min))
+                               }
+                               t.Logf("additional info: secret %d at offset %x in %s%s", v.id, v.off-r.min, r.name, addr)
+                       }
+               }
+       }
+}
+
+type elfRegion struct {
+       name             string
+       min, max         uint64 // core file offset range
+       addrMin, addrMax uint64 // inferior address range (or 0,0 if no address, like registers)
+}
+
+func elfRegions(t *testing.T, core *os.File, coreElf *elf.File) []elfRegion {
+       var regions []elfRegion
+       for _, p := range coreElf.Progs {
+               regions = append(regions, elfRegion{
+                       name:    fmt.Sprintf("%s[%s]", p.Type, p.Flags),
+                       min:     p.Off,
+                       max:     p.Off + min(p.Filesz, p.Memsz),
+                       addrMin: p.Vaddr,
+                       addrMax: p.Vaddr + min(p.Filesz, p.Memsz),
+               })
+       }
+
+       // TODO(dmo): parse thread regions for arm64.
+       // This doesn't invalidate the test, it just makes it harder to figure
+       // out where we're leaking stuff.
+       if runtime.GOARCH == "amd64" {
+               regions = append(regions, threadRegions(t, core, coreElf)...)
+       }
+
+       for i, r1 := range regions {
+               for j, r2 := range regions {
+                       if i == j {
+                               continue
+                       }
+                       if r1.max <= r2.min || r2.max <= r1.min {
+                               continue
+                       }
+                       t.Fatalf("overlapping regions %v %v", r1, r2)
+               }
+       }
+
+       return regions
+}
+
+func threadRegions(t *testing.T, core *os.File, coreElf *elf.File) []elfRegion {
+       var regions []elfRegion
+
+       for _, prog := range coreElf.Progs {
+               if prog.Type != elf.PT_NOTE {
+                       continue
+               }
+
+               b := make([]byte, prog.Filesz)
+               _, err := core.ReadAt(b, int64(prog.Off))
+               if err != nil {
+                       t.Fatalf("can't read core file %v", err)
+               }
+               prefix := "unk"
+               b0 := b
+               for len(b) > 0 {
+                       namesz := coreElf.ByteOrder.Uint32(b)
+                       b = b[4:]
+                       descsz := coreElf.ByteOrder.Uint32(b)
+                       b = b[4:]
+                       typ := elf.NType(coreElf.ByteOrder.Uint32(b))
+                       b = b[4:]
+                       name := string(b[:namesz-1])
+                       b = b[(namesz+3)/4*4:]
+                       off := prog.Off + uint64(len(b0)-len(b))
+                       desc := b[:descsz]
+                       b = b[(descsz+3)/4*4:]
+
+                       if name != "CORE" && name != "LINUX" {
+                               continue
+                       }
+                       end := off + uint64(len(desc))
+                       // Note: amd64 specific
+                       // See /usr/include/x86_64-linux-gnu/bits/sigcontext.h
+                       //
+                       //   struct _fpstate
+                       switch typ {
+                       case elf.NT_PRSTATUS:
+                               pid := coreElf.ByteOrder.Uint32(desc[32:36])
+                               prefix = fmt.Sprintf("thread%d: ", pid)
+                               regions = append(regions, elfRegion{
+                                       name: prefix + "prstatus header",
+                                       min:  off,
+                                       max:  off + 112,
+                               })
+                               off += 112
+                               greg := []string{
+                                       "r15",
+                                       "r14",
+                                       "r13",
+                                       "r12",
+                                       "rbp",
+                                       "rbx",
+                                       "r11",
+                                       "r10",
+                                       "r9",
+                                       "r8",
+                                       "rax",
+                                       "rcx",
+                                       "rdx",
+                                       "rsi",
+                                       "rdi",
+                                       "orig_rax",
+                                       "rip",
+                                       "cs",
+                                       "eflags",
+                                       "rsp",
+                                       "ss",
+                                       "fs_base",
+                                       "gs_base",
+                                       "ds",
+                                       "es",
+                                       "fs",
+                                       "gs",
+                               }
+                               for _, r := range greg {
+                                       regions = append(regions, elfRegion{
+                                               name: prefix + r,
+                                               min:  off,
+                                               max:  off + 8,
+                                       })
+                                       off += 8
+                               }
+                               regions = append(regions, elfRegion{
+                                       name: prefix + "prstatus footer",
+                                       min:  off,
+                                       max:  off + 8,
+                               })
+                               off += 8
+                       case elf.NT_FPREGSET:
+                               regions = append(regions, elfRegion{
+                                       name: prefix + "fpregset header",
+                                       min:  off,
+                                       max:  off + 32,
+                               })
+                               off += 32
+                               for i := 0; i < 8; i++ {
+                                       regions = append(regions, elfRegion{
+                                               name: prefix + fmt.Sprintf("mmx%d", i),
+                                               min:  off,
+                                               max:  off + 16,
+                                       })
+                                       off += 16
+                                       // They are long double (10 bytes), but
+                                       // stored in 16-byte slots.
+                               }
+                               for i := 0; i < 16; i++ {
+                                       regions = append(regions, elfRegion{
+                                               name: prefix + fmt.Sprintf("xmm%d", i),
+                                               min:  off,
+                                               max:  off + 16,
+                                       })
+                                       off += 16
+                               }
+                               regions = append(regions, elfRegion{
+                                       name: prefix + "fpregset footer",
+                                       min:  off,
+                                       max:  off + 96,
+                               })
+                               off += 96
+                               /*
+                                       case NT_X86_XSTATE: // aka NT_PRPSINFO+511
+                                               // legacy: 512 bytes
+                                               // xsave header: 64 bytes
+                                               fmt.Printf("hdr %v\n", desc[512:][:64])
+                                               // ymm high128: 256 bytes
+
+                                               println(len(desc))
+                                               fallthrough
+                               */
+                       default:
+                               regions = append(regions, elfRegion{
+                                       name: fmt.Sprintf("%s/%s", name, typ),
+                                       min:  off,
+                                       max:  off + uint64(len(desc)),
+                               })
+                               off += uint64(len(desc))
+                       }
+                       if off != end {
+                               t.Fatalf("note section incomplete")
+                       }
+               }
+       }
+       return regions
+}
diff --git a/src/runtime/secret/export.go b/src/runtime/secret/export.go
new file mode 100644 (file)
index 0000000..34f3c37
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package secret
+
+import (
+       "internal/cpu"
+       "unsafe"
+)
+
+// exports for assembly testing functions
+const (
+       offsetX86HasAVX    = unsafe.Offsetof(cpu.X86.HasAVX)
+       offsetX86HasAVX512 = unsafe.Offsetof(cpu.X86.HasAVX512)
+)
diff --git a/src/runtime/secret/secret.go b/src/runtime/secret/secret.go
new file mode 100644 (file)
index 0000000..f669b98
--- /dev/null
@@ -0,0 +1,128 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.runtimesecret
+
+package secret
+
+import (
+       "runtime"
+       _ "unsafe"
+)
+
+// Do invokes f.
+//
+// Do ensures that any temporary storage used by f is erased in a
+// timely manner. (In this context, "f" is shorthand for the
+// entire call tree initiated by f.)
+//   - Any registers used by f are erased before Do returns.
+//   - Any stack used by f is erased before Do returns.
+//   - Any heap allocation done by f is erased as soon as the garbage
+//     collector realizes that it is no longer reachable.
+//   - Do works even if f panics or calls runtime.Goexit.  As part of
+//     that, any panic raised by f will appear as if it originates from
+//     Do itself.
+//
+// Limitations:
+//   - Currently only supported on linux/amd64 and linux/arm64.  On unsupported
+//     platforms, Do will invoke f directly.
+//   - Protection does not extend to any global variables written by f.
+//   - Any attempt to launch a goroutine by f will result in a panic.
+//   - If f calls runtime.Goexit, erasure can be delayed by defers
+//     higher up on the call stack.
+//   - Heap allocations will only be erased if the program drops all
+//     references to those allocations, and then the garbage collector
+//     notices that those references are gone. The former is under
+//     control of the program, but the latter is at the whim of the
+//     runtime.
+//   - Any value panicked by f may point to allocations from within
+//     f. Those allocations will not be erased until (at least) the
+//     panicked value is dead.
+//   - Pointer addresses may leak into data buffers used by the runtime
+//     to perform garbage collection. Users should not encode confidential
+//     information into pointers. For example, if an offset into an array or
+//     struct is confidential, then users should not create a pointer into
+//     the object. Since this function is intended to be used with constant-time
+//     cryptographic code, this requirement is usually fulfilled implicitly.
+func Do(f func()) {
+       const osArch = runtime.GOOS + "/" + runtime.GOARCH
+       switch osArch {
+       default:
+               // unsupported, just invoke f directly.
+               f()
+               return
+       case "linux/amd64", "linux/arm64":
+       }
+
+       // Place to store any panic value.
+       var p any
+
+       // Step 1: increment the nesting count.
+       inc()
+
+       // Step 2: call helper. The helper just calls f
+       // and captures (recovers) any panic result.
+       p = doHelper(f)
+
+       // Step 3: erase everything used by f (stack, registers).
+       eraseSecrets()
+
+       // Step 4: decrement the nesting count.
+       dec()
+
+       // Step 5: re-raise any caught panic.
+       // This will make the panic appear to come
+       // from a stack whose bottom frame is
+       // runtime/secret.Do.
+       // Anything below that to do with f will be gone.
+       //
+       // Note that the panic value is not erased. It behaves
+       // like any other value that escapes from f. If it is
+       // heap allocated, it will be erased when the garbage
+       // collector notices it is no longer referenced.
+       if p != nil {
+               panic(p)
+       }
+
+       // Note: if f calls runtime.Goexit, step 3 and above will not
+       // happen, as Goexit is unrecoverable. We handle that case in
+       // runtime/proc.go:goexit0.
+}
+
+func doHelper(f func()) (p any) {
+       // Step 2b: Pop the stack up to the secret.doHelper frame
+       // if we are in the process of panicking.
+       // (It is a no-op if we are not panicking.)
+       // We return any panicked value to secret.Do, who will
+       // re-panic it.
+       defer func() {
+               // Note: we rely on the go1.21+ behavior that
+               // if we are panicking, recover returns non-nil.
+               p = recover()
+       }()
+
+       // Step 2a: call the secret function.
+       f()
+
+       return
+}
+
+// Enabled reports whether [Do] appears anywhere on the call stack.
+func Enabled() bool {
+       return count() > 0
+}
+
+// implemented in runtime
+
+//go:linkname count
+func count() int32
+
+//go:linkname inc
+func inc()
+
+//go:linkname dec
+func dec()
+
+//go:linkname eraseSecrets
+func eraseSecrets()
diff --git a/src/runtime/secret/secret_test.go b/src/runtime/secret/secret_test.go
new file mode 100644 (file)
index 0000000..7651a93
--- /dev/null
@@ -0,0 +1,293 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// the race detector does not like our pointer shenanigans
+// while checking the stack.
+
+//go:build goexperiment.runtimesecret && (arm64 || amd64) && linux && !race
+
+package secret
+
+import (
+       "runtime"
+       "strings"
+       "testing"
+       "time"
+       "unsafe"
+)
+
+type secretType int64
+
+const secretValue = 0x53c237_53c237
+
+// S is a type that might have some secrets in it.
+type S [100]secretType
+
+// makeS makes an S with secrets in it.
+//
+//go:noinline
+func makeS() S {
+       // Note: noinline ensures this doesn't get inlined and
+       // completely optimized away.
+       var s S
+       for i := range s {
+               s[i] = secretValue
+       }
+       return s
+}
+
+// heapS allocates an S on the heap with secrets in it.
+//
+//go:noinline
+func heapS() *S {
+       // Note: noinline forces heap allocation
+       s := makeS()
+       return &s
+}
+
+// for the tiny allocator
+//
+//go:noinline
+func heapSTiny() *secretType {
+       s := new(secretType(secretValue))
+       return s
+}
+
+// Test that when we allocate inside secret.Do, the resulting
+// allocations are zeroed by the garbage collector when they
+// are freed.
+// See runtime/mheap.go:freeSpecial.
+func TestHeap(t *testing.T) {
+       var u uintptr
+       Do(func() {
+               u = uintptr(unsafe.Pointer(heapS()))
+       })
+
+       runtime.GC()
+
+       // Check that object got zeroed.
+       checkRangeForSecret(t, u, u+unsafe.Sizeof(S{}))
+       // Also check our stack, just because we can.
+       checkStackForSecret(t)
+}
+
+func TestHeapTiny(t *testing.T) {
+       var u uintptr
+       Do(func() {
+               u = uintptr(unsafe.Pointer(heapSTiny()))
+       })
+       runtime.GC()
+
+       // Check that object got zeroed.
+       checkRangeForSecret(t, u, u+unsafe.Sizeof(secretType(0)))
+       // Also check our stack, just because we can.
+       checkStackForSecret(t)
+}
+
+// Test that when we return from secret.Do, we zero the stack used
+// by the argument to secret.Do.
+// See runtime/secret.go:secret_dec.
+func TestStack(t *testing.T) {
+       checkStackForSecret(t) // if this fails, something is wrong with the test
+
+       Do(func() {
+               s := makeS()
+               use(&s)
+       })
+
+       checkStackForSecret(t)
+}
+
+//go:noinline
+func use(s *S) {
+       // Note: noinline prevents dead variable elimination.
+}
+
+// Test that when we copy a stack, we zero the old one.
+// See runtime/stack.go:copystack.
+func TestStackCopy(t *testing.T) {
+       checkStackForSecret(t) // if this fails, something is wrong with the test
+
+       var lo, hi uintptr
+       Do(func() {
+               // Put some secrets on the current stack frame.
+               s := makeS()
+               use(&s)
+               // Remember the current stack.
+               lo, hi = getStack()
+               // Use a lot more stack to force a stack copy.
+               growStack()
+       })
+       checkRangeForSecret(t, lo, hi) // pre-grow stack
+       checkStackForSecret(t)         // post-grow stack (just because we can)
+}
+
+func growStack() {
+       growStack1(1000)
+}
+func growStack1(n int) {
+       if n == 0 {
+               return
+       }
+       growStack1(n - 1)
+}
+
+func TestPanic(t *testing.T) {
+       checkStackForSecret(t) // if this fails, something is wrong with the test
+
+       defer func() {
+               checkStackForSecret(t)
+
+               p := recover()
+               if p == nil {
+                       t.Errorf("panic squashed")
+                       return
+               }
+               var e error
+               var ok bool
+               if e, ok = p.(error); !ok {
+                       t.Errorf("panic not an error")
+               }
+               if !strings.Contains(e.Error(), "divide by zero") {
+                       t.Errorf("panic not a divide by zero error: %s", e.Error())
+               }
+               var pcs [10]uintptr
+               n := runtime.Callers(0, pcs[:])
+               frames := runtime.CallersFrames(pcs[:n])
+               for {
+                       frame, more := frames.Next()
+                       if strings.Contains(frame.Function, "dividePanic") {
+                               t.Errorf("secret function in traceback")
+                       }
+                       if !more {
+                               break
+                       }
+               }
+       }()
+       Do(dividePanic)
+}
+
+func dividePanic() {
+       s := makeS()
+       use(&s)
+       _ = 8 / zero
+}
+
+var zero int
+
+func TestGoExit(t *testing.T) {
+       checkStackForSecret(t) // if this fails, something is wrong with the test
+
+       c := make(chan uintptr, 2)
+
+       go func() {
+               // Run the test in a separate goroutine
+               defer func() {
+                       // Tell original goroutine what our stack is
+                       // so it can check it for secrets.
+                       lo, hi := getStack()
+                       c <- lo
+                       c <- hi
+               }()
+               Do(func() {
+                       s := makeS()
+                       use(&s)
+                       // there's an entire round-trip through the scheduler between here
+                       // and when we are able to check if the registers are still dirtied, and we're
+                       // not guaranteed to run on the same M. Make a best effort attempt anyway
+                       loadRegisters(unsafe.Pointer(&s))
+                       runtime.Goexit()
+               })
+               t.Errorf("goexit didn't happen")
+       }()
+       lo := <-c
+       hi := <-c
+       // We want to wait until the other goroutine has finished Goexiting and
+       // cleared its stack. There's no signal for that, so just wait a bit.
+       time.Sleep(1 * time.Millisecond)
+
+       checkRangeForSecret(t, lo, hi)
+
+       var spillArea [64]secretType
+       n := spillRegisters(unsafe.Pointer(&spillArea))
+       if n > unsafe.Sizeof(spillArea) {
+               t.Fatalf("spill area overrun %d\n", n)
+       }
+       for i, v := range spillArea {
+               if v == secretValue {
+                       t.Errorf("secret found in spill slot %d", i)
+               }
+       }
+}
+
+func checkStackForSecret(t *testing.T) {
+       t.Helper()
+       lo, hi := getStack()
+       checkRangeForSecret(t, lo, hi)
+}
+func checkRangeForSecret(t *testing.T, lo, hi uintptr) {
+       t.Helper()
+       for p := lo; p < hi; p += unsafe.Sizeof(secretType(0)) {
+               v := *(*secretType)(unsafe.Pointer(p))
+               if v == secretValue {
+                       t.Errorf("secret found in [%x,%x] at %x", lo, hi, p)
+               }
+       }
+}
+
+func TestRegisters(t *testing.T) {
+       Do(func() {
+               s := makeS()
+               loadRegisters(unsafe.Pointer(&s))
+       })
+       var spillArea [64]secretType
+       n := spillRegisters(unsafe.Pointer(&spillArea))
+       if n > unsafe.Sizeof(spillArea) {
+               t.Fatalf("spill area overrun %d\n", n)
+       }
+       for i, v := range spillArea {
+               if v == secretValue {
+                       t.Errorf("secret found in spill slot %d", i)
+               }
+       }
+}
+
+func TestSignalStacks(t *testing.T) {
+       Do(func() {
+               s := makeS()
+               loadRegisters(unsafe.Pointer(&s))
+               // cause a signal with our secret state to dirty
+               // at least one of the signal stacks
+               func() {
+                       defer func() {
+                               x := recover()
+                               if x == nil {
+                                       panic("did not get panic")
+                               }
+                       }()
+                       var p *int
+                       *p = 20
+               }()
+       })
+       // signal stacks aren't cleared until after
+       // the next GC after secret.Do returns
+       runtime.GC()
+       stk := make([]stack, 0, 100)
+       stk = appendSignalStacks(stk)
+       for _, s := range stk {
+               checkRangeForSecret(t, s.lo, s.hi)
+       }
+}
+
+// hooks into the runtime
+func getStack() (uintptr, uintptr)
+
+// Stack is a copy of runtime.stack for testing export.
+// Fields must match.
+type stack struct {
+       lo uintptr
+       hi uintptr
+}
+
+func appendSignalStacks([]stack) []stack
diff --git a/src/runtime/secret/stubs.go b/src/runtime/secret/stubs.go
new file mode 100644 (file)
index 0000000..ec66ef2
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 || amd64
+
+// testing stubs, these are implemented in assembly in
+// asm_$GOARCH.s
+//
+// Note that this file is also used as a template to build a
+// crashing binary that tries to leave secrets in places where
+// they are supposed to be erased. see crash_test.go for more info
+
+package secret
+
+import "unsafe"
+
+// Load data from p into test registers.
+//
+//go:noescape
+func loadRegisters(p unsafe.Pointer)
+
+// Spill data from test registers into p.
+// Returns the amount of space filled in.
+//
+//go:noescape
+func spillRegisters(p unsafe.Pointer) uintptr
+
+// Load secret into all registers.
+//
+//go:noescape
+func useSecret(secret []byte)
diff --git a/src/runtime/secret/stubs_noasm.go b/src/runtime/secret/stubs_noasm.go
new file mode 100644 (file)
index 0000000..f8091ff
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 && !amd64
+
+package secret
+
+import "unsafe"
+
+func loadRegisters(p unsafe.Pointer)          {}
+func spillRegisters(p unsafe.Pointer) uintptr { return 0 }
+func useSecret(secret []byte)                 {}
diff --git a/src/runtime/secret/testdata/crash.go b/src/runtime/secret/testdata/crash.go
new file mode 100644 (file)
index 0000000..cf48fb7
--- /dev/null
@@ -0,0 +1,142 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "os"
+       "runtime"
+       "runtime/debug"
+       "runtime/secret"
+       "sync"
+       "syscall"
+       "time"
+       _ "unsafe"
+       "weak"
+)
+
+// callback from assembly
+//
+//go:linkname delay main.delay
+func delay() {
+       time.Sleep(1 * time.Millisecond)
+}
+
+// Same secret as in ../../crash_test.go
+var secretStore = [8]byte{
+       0x00,
+       0x81,
+       0xa0,
+       0xc6,
+       0xb3,
+       0x01,
+       0x66,
+       0x53,
+}
+
+func main() {
+       enableCore()
+       useSecretProc()
+       // clear out secret. That way we don't have
+       // to figure out which secret is the allowed
+       // source
+       clear(secretStore[:])
+       panic("terminate")
+}
+
+// Copied from runtime/runtime-gdb_unix_test.go
+func enableCore() {
+       debug.SetTraceback("crash")
+
+       var lim syscall.Rlimit
+       err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim)
+       if err != nil {
+               panic(fmt.Sprintf("error getting rlimit: %v", err))
+       }
+       lim.Cur = lim.Max
+       fmt.Fprintf(os.Stderr, "Setting RLIMIT_CORE = %+#v\n", lim)
+       err = syscall.Setrlimit(syscall.RLIMIT_CORE, &lim)
+       if err != nil {
+               panic(fmt.Sprintf("error setting rlimit: %v", err))
+       }
+}
+
+// useSecretProc does 5 seconds of work, using the secret value
+// inside secret.Do in a bunch of ways.
+func useSecretProc() {
+       stop := make(chan bool)
+       var wg sync.WaitGroup
+
+       for i := 0; i < 4; i++ {
+               wg.Add(1)
+               go func() {
+                       time.Sleep(1 * time.Second)
+                       for {
+                               select {
+                               case <-stop:
+                                       wg.Done()
+                                       return
+                               default:
+                                       secret.Do(func() {
+                                               // Copy key into a variable-sized heap allocation.
+                                               // This both puts secrets in heap objects,
+                                               // and more generally just causes allocation,
+                                               // which forces garbage collection, which
+                                               // requires interrupts and the like.
+                                               s := bytes.Repeat(secretStore[:], 1+i*2)
+                                               // Also spam the secret across all registers.
+                                               useSecret(s)
+                                       })
+                               }
+                       }
+               }()
+       }
+
+       // Send some allocations over a channel. This does 2 things:
+       // 1) forces some GCs to happen
+       // 2) causes more scheduling noise (Gs moving between Ms, etc.)
+       c := make(chan []byte)
+       wg.Add(2)
+       go func() {
+               for {
+                       select {
+                       case <-stop:
+                               wg.Done()
+                               return
+                       case c <- make([]byte, 256):
+                       }
+               }
+       }()
+       go func() {
+               for {
+                       select {
+                       case <-stop:
+                               wg.Done()
+                               return
+                       case <-c:
+                       }
+               }
+       }()
+
+       time.Sleep(5 * time.Second)
+       close(stop)
+       wg.Wait()
+       // use a weak reference for ensuring that the GC has cleared everything
+       // Use a large value to avoid the tiny allocator.
+       w := weak.Make(new([2048]byte))
+       // 20 seems like a decent amount?
+       for i := 0; i < 20; i++ {
+               runtime.GC() // GC should clear any secret heap objects and clear out scheduling buffers.
+               if w.Value() == nil {
+                       fmt.Fprintf(os.Stderr, "number of GCs %v\n", i+1)
+                       return
+               }
+       }
+       fmt.Fprintf(os.Stderr, "GC didn't clear out in time\n")
+       // This will cause the core dump to happen with the sentinel value still in memory
+       // so we will detect the fault.
+       panic("fault")
+}
diff --git a/src/runtime/secret_amd64.s b/src/runtime/secret_amd64.s
new file mode 100644 (file)
index 0000000..06103d1
--- /dev/null
@@ -0,0 +1,107 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+#include "funcdata.h"
+
+// TODO(dmo): generate these with mkpreempt.go, the register sets
+// are tightly coupled and this will ensure that we keep them
+// all synchronized
+
+// secretEraseRegisters erases any register that may
+// have been used with user code within a secret.Do function.
+// This is roughly the general purpose and floating point
+// registers, barring any reserved registers and registers generally
+// considered architectural (amd64 segment registers, arm64 exception registers)
+TEXT ·secretEraseRegisters(SB),NOFRAME|NOSPLIT,$0-0
+       XORL    AX, AX
+       JMP ·secretEraseRegistersMcall(SB)
+
+// Mcall requires an argument in AX. This function
+// excludes that register from being cleared
+TEXT ·secretEraseRegistersMcall(SB),NOSPLIT|NOFRAME,$0-0
+       // integer registers
+       XORL    BX, BX
+       XORL    CX, CX
+       XORL    DX, DX
+       XORL    DI, DI
+       XORL    SI, SI
+       // BP = frame pointer
+       // SP = stack pointer
+       XORL    R8, R8
+       XORL    R9, R9
+       XORL    R10, R10
+       XORL    R11, R11
+       XORL    R12, R12
+       XORL    R13, R13
+       // R14 = G register
+       XORL    R15, R15
+
+       // floating-point registers
+       CMPB    internal∕cpu·X86+const_offsetX86HasAVX(SB), $1
+       JEQ     avx
+
+       PXOR    X0, X0
+       PXOR    X1, X1
+       PXOR    X2, X2
+       PXOR    X3, X3
+       PXOR    X4, X4
+       PXOR    X5, X5
+       PXOR    X6, X6
+       PXOR    X7, X7
+       PXOR    X8, X8
+       PXOR    X9, X9
+       PXOR    X10, X10
+       PXOR    X11, X11
+       PXOR    X12, X12
+       PXOR    X13, X13
+       PXOR    X14, X14
+       PXOR    X15, X15
+       JMP     noavx512
+
+avx:
+       // VZEROALL zeroes all of the X0-X15 registers, no matter how wide.
+       // That includes Y0-Y15 (256-bit avx) and Z0-Z15 (512-bit avx512).
+       VZEROALL
+
+       // Clear all the avx512 state.
+       CMPB    internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+       JNE     noavx512
+
+       // Zero X16-X31
+       // Note that VZEROALL above already cleared Z0-Z15.
+       VMOVAPD Z0, Z16
+       VMOVAPD Z0, Z17
+       VMOVAPD Z0, Z18
+       VMOVAPD Z0, Z19
+       VMOVAPD Z0, Z20
+       VMOVAPD Z0, Z21
+       VMOVAPD Z0, Z22
+       VMOVAPD Z0, Z23
+       VMOVAPD Z0, Z24
+       VMOVAPD Z0, Z25
+       VMOVAPD Z0, Z26
+       VMOVAPD Z0, Z27
+       VMOVAPD Z0, Z28
+       VMOVAPD Z0, Z29
+       VMOVAPD Z0, Z30
+       VMOVAPD Z0, Z31
+
+       // Zero k0-k7
+       KXORQ   K0, K0, K0
+       KXORQ   K0, K0, K1
+       KXORQ   K0, K0, K2
+       KXORQ   K0, K0, K3
+       KXORQ   K0, K0, K4
+       KXORQ   K0, K0, K5
+       KXORQ   K0, K0, K6
+       KXORQ   K0, K0, K7
+
+noavx512:
+       // misc registers
+       CMPL    BX, BX  //eflags
+       // segment registers? Direction flag? Both seem overkill.
+
+       RET
diff --git a/src/runtime/secret_arm64.s b/src/runtime/secret_arm64.s
new file mode 100644 (file)
index 0000000..d21b139
--- /dev/null
@@ -0,0 +1,90 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+#include "funcdata.h"
+
+TEXT ·secretEraseRegisters(SB),NOFRAME|NOSPLIT,$0-0
+       MOVD    ZR, R0
+       MOVD    ZR, R26
+       JMP ·secretEraseRegistersMcall(SB)
+
+// Mcall requires an argument in R0 and does not have a
+// stack frame to spill into. Additionally, there is no stack
+// to spill the link register into. This function deliberately
+// doesn't clear R0 and R26, and Mcall uses R26 as a link register.
+TEXT ·secretEraseRegistersMcall(SB),NOFRAME|NOSPLIT,$0-0
+       // integer registers
+       MOVD    ZR, R1
+       MOVD    ZR, R2
+       MOVD    ZR, R3
+       MOVD    ZR, R4
+       MOVD    ZR, R5
+       MOVD    ZR, R6
+       MOVD    ZR, R7
+       MOVD    ZR, R8
+       MOVD    ZR, R9
+       MOVD    ZR, R10
+       MOVD    ZR, R11
+       MOVD    ZR, R12
+       MOVD    ZR, R13
+       MOVD    ZR, R14
+       MOVD    ZR, R15
+       MOVD    ZR, R16
+       MOVD    ZR, R17
+       // R18 = platform register
+       MOVD    ZR, R19
+       MOVD    ZR, R20
+       MOVD    ZR, R21
+       MOVD    ZR, R22
+       MOVD    ZR, R23
+       MOVD    ZR, R24
+       MOVD    ZR, R25
+       // R26 used for extra link register in mcall where we can't spill
+       MOVD    ZR, R27
+       // R28 = g
+       // R29 = frame pointer
+       // R30 = link pointer (return address)
+       // R31 = stack pointer
+
+       // floating point registers
+       // (also clears simd registers)
+       FMOVD   ZR, F0
+       FMOVD   ZR, F1
+       FMOVD   ZR, F2
+       FMOVD   ZR, F3
+       FMOVD   ZR, F4
+       FMOVD   ZR, F5
+       FMOVD   ZR, F6
+       FMOVD   ZR, F7
+       FMOVD   ZR, F8
+       FMOVD   ZR, F9
+       FMOVD   ZR, F10
+       FMOVD   ZR, F11
+       FMOVD   ZR, F12
+       FMOVD   ZR, F13
+       FMOVD   ZR, F14
+       FMOVD   ZR, F15
+       FMOVD   ZR, F16
+       FMOVD   ZR, F17
+       FMOVD   ZR, F18
+       FMOVD   ZR, F19
+       FMOVD   ZR, F20
+       FMOVD   ZR, F21
+       FMOVD   ZR, F22
+       FMOVD   ZR, F23
+       FMOVD   ZR, F24
+       FMOVD   ZR, F25
+       FMOVD   ZR, F26
+       FMOVD   ZR, F27
+       FMOVD   ZR, F28
+       FMOVD   ZR, F29
+       FMOVD   ZR, F30
+       FMOVD   ZR, F31
+
+       // misc registers
+       CMP     ZR, ZR // N,Z,C,V flags
+
+       RET
diff --git a/src/runtime/secret_asm.go b/src/runtime/secret_asm.go
new file mode 100644 (file)
index 0000000..08223a6
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 || amd64
+
+package runtime
+
+func secretEraseRegisters()
diff --git a/src/runtime/secret_noasm.go b/src/runtime/secret_noasm.go
new file mode 100644 (file)
index 0000000..3f7e49a
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 && !amd64
+
+package runtime
+
+func secretEraseRegisters() {
+       throw("runtime/secret.Do not supported yet")
+}
diff --git a/src/runtime/secret_nosecret.go b/src/runtime/secret_nosecret.go
new file mode 100644 (file)
index 0000000..bf50fb5
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(amd64 || arm64) || !linux
+
+package runtime
+
+import "unsafe"
+
+// Stubs for platforms that do not implement runtime/secret
+
+//go:linkname secret_count runtime/secret.count
+func secret_count() int32 { return 0 }
+
+//go:linkname secret_inc runtime/secret.inc
+func secret_inc() {}
+
+//go:linkname secret_dec runtime/secret.dec
+func secret_dec() {}
+
+//go:linkname secret_eraseSecrets runtime/secret.eraseSecrets
+func secret_eraseSecrets() {}
+
+func addSecret(p unsafe.Pointer) {}
+
+type specialSecret struct{}
+
+//go:linkname secret_getStack runtime/secret.getStack
+func secret_getStack() (uintptr, uintptr) { return 0, 0 }
+
+func noopSignal(mp *m) {}
index 573b11839740de5c436482202fc89a07141ad3fe..f4559f570e8987c23317a50a5cf69b6c8879005e 100644 (file)
@@ -54,3 +54,31 @@ func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
 func (c *sigctxt) set_sigaddr(x uint64) {
        *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
 }
+
+// dumpSigStack prints a signal stack with the context, fpstate pointer field within that context and
+// the beginning of the fpstate annotated by C/F/S respectively
+func dumpSigStack(s string, sp uintptr, stackhi uintptr, ctx uintptr) {
+       println(s)
+       println("SP:\t", hex(sp))
+       println("ctx:\t", hex(ctx))
+       fpfield := ctx + unsafe.Offsetof(ucontext{}.uc_mcontext) + unsafe.Offsetof(mcontext{}.fpregs)
+       println("fpfield:\t", hex(fpfield))
+       fpbegin := uintptr(unsafe.Pointer((&sigctxt{nil, unsafe.Pointer(ctx)}).regs().fpstate))
+       println("fpstate:\t", hex(fpbegin))
+       hexdumpWords(sp, stackhi, func(p uintptr, hm hexdumpMarker) {
+               switch p {
+               case ctx:
+                       hm.start()
+                       print("C")
+                       println()
+               case fpfield:
+                       hm.start()
+                       print("F")
+                       println()
+               case fpbegin:
+                       hm.start()
+                       print("S")
+                       println()
+               }
+       })
+}
index 4ccc03079235465e3301365c7e6cf7f156549c21..2d31051fd04c94444be849f9d24de785b37dcce1 100644 (file)
@@ -69,3 +69,22 @@ func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x }
 func (c *sigctxt) set_sigaddr(x uint64) {
        *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
 }
+
+func dumpSigStack(s string, sp uintptr, stackhi uintptr, ctx uintptr) {
+       println(s)
+       println("SP:\t", hex(sp))
+       println("ctx:\t", hex(ctx))
+       entriesStart := uintptr(unsafe.Pointer(&(*ucontext)(unsafe.Pointer(ctx)).uc_mcontext.__reserved))
+       hexdumpWords(sp, stackhi, func(p uintptr, hm hexdumpMarker) {
+               switch p {
+               case ctx:
+                       hm.start()
+                       print("C")
+                       println()
+               case entriesStart:
+                       hm.start()
+                       print("E")
+                       println()
+               }
+       })
+}
index 96628d6baae910984cb1cccc883d95944673efe3..f352cb3c0244710edda64ebf7f7f2717f6fa65b7 100644 (file)
@@ -8,6 +8,7 @@ package runtime
 
 import (
        "internal/abi"
+       "internal/goexperiment"
        "internal/runtime/atomic"
        "internal/runtime/sys"
        "unsafe"
@@ -488,6 +489,11 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
 
        c.fixsigcode(sig)
        sighandler(sig, info, ctx, gp)
+
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               atomic.Store(&gp.m.signalSecret, 1)
+       }
+
        setg(gp)
        if setStack {
                restoreGsignalStack(&gsignalStack)
index 5888177f0ea7a1cfebddb7eddc7bfeae641c805f..9dde0da9636a48eab297bf3b5b9c20818e69eba4 100644 (file)
@@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
                _32bit uintptr // size on 32bit platforms
                _64bit uintptr // size on 64bit platforms
        }{
-               {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing
+               {runtime.G{}, 284 + xreg, 448 + xreg}, // g, but exported for testing
                {runtime.Sudog{}, 64, 104},            // sudog, but exported for testing
        }
 
index c92accf18826e058218b03f0f2f82f985fd49671..d1c80276a5c46947bf81793ba3f6fce1c31ef262 100644 (file)
@@ -8,6 +8,7 @@ import (
        "internal/abi"
        "internal/cpu"
        "internal/goarch"
+       "internal/goexperiment"
        "internal/goos"
        "internal/runtime/atomic"
        "internal/runtime/gc"
@@ -985,6 +986,16 @@ func copystack(gp *g, newsize uintptr) {
        }
 
        // free old stack
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               // Some portion of the old stack has secret stuff on it.
+               // We don't really know where we entered secret mode,
+               // so just clear the whole thing.
+               // TODO(dmo): traceback until we hit secret.Do? clearing
+               // is fast and optimized, might not be worth it.
+               memclrNoHeapPointers(unsafe.Pointer(old.lo), old.hi-old.lo)
+               // The memmove call above might put secrets from the stack into registers.
+               secretEraseRegisters()
+       }
        if stackPoisonCopy != 0 {
                fillstack(old, 0xfc)
        }
@@ -1026,6 +1037,14 @@ func newstack() {
        }
 
        gp := thisg.m.curg
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               // If we're entering here from a secret context, clear
+               // all the registers. This is important because we
+               // might context switch to a different goroutine which
+               // is not in secret mode, and it will not be careful
+               // about clearing its registers.
+               secretEraseRegisters()
+       }
 
        if thisg.m.curg.throwsplit {
                // Update syscallsp, syscallpc in case traceback uses them.
index e252a4b9147ffb99d2f101528c26ba7328522457..618553b1969fadde7a62f9a7f799aad746fe1b43 100644 (file)
@@ -228,6 +228,18 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
        // due to stack probes inserted to avoid stack/heap collisions.
        // See issue #20427.
 
+#ifdef GOEXPERIMENT_runtimesecret
+       // The kernel might spill our secrets onto g0
+       // erase our registers here.
+       // TODO(dmo): what is the ABI guarantee here? we use
+       // R14 later, but the function is ABI0
+       CMPL    g_secret(R14), $0
+       JEQ     nosecret
+       CALL    ·secretEraseRegisters(SB)
+
+nosecret:
+#endif
+
        MOVQ    SP, R12 // Save old SP; R12 unchanged by C code.
 
        MOVQ    g_m(R14), BX // BX unchanged by C code.
index 7a81d5479e3e282dfe07ed31ae45b2a6ef9f8b02..88f7213525ff745299f1aa0e94768aa1639ac850 100644 (file)
@@ -225,6 +225,13 @@ TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28
 
 // func walltime() (sec int64, nsec int32)
 TEXT runtime·walltime(SB),NOSPLIT,$24-12
+#ifdef GOEXPERIMENT_runtimesecret
+       MOVW    g_secret(g), R20
+       CBZ     R20, nosecret
+       BL      ·secretEraseRegisters(SB)
+
+nosecret:
+#endif
        MOVD    RSP, R20        // R20 is unchanged by C code
        MOVD    RSP, R1
 
@@ -309,6 +316,13 @@ finish:
        RET
 
 TEXT runtime·nanotime1(SB),NOSPLIT,$24-8
+#ifdef GOEXPERIMENT_runtimesecret
+       MOVW    g_secret(g), R20
+       CBZ     R20, nosecret
+       BL      ·secretEraseRegisters(SB)
+
+nosecret:
+#endif
        MOVD    RSP, R20        // R20 is unchanged by C code
        MOVD    RSP, R1
 
index fa9561b25b56966da15592c4ca879e17c0199c89..4935c6dec3ddd49a8ae9e495986e3cbd22b71752 100644 (file)
 
 // func now() (sec int64, nsec int32, mono int64)
 TEXT time·now<ABIInternal>(SB),NOSPLIT,$16-24
+#ifdef GOEXPERIMENT_runtimesecret
+       // The kernel might spill our secrets onto g0
+       // erase our registers here.
+       CMPL    g_secret(R14), $0
+       JEQ     nosecret
+       CALL    ·secretEraseRegisters(SB)
+
+nosecret:
+#endif
+
        MOVQ    SP, R12 // Save old SP; R12 unchanged by C code.
 
        MOVQ    g_m(R14), BX // BX unchanged by C code.
index 225f7029be1b977174991e65aa060e81ab3598aa..5e755dcc3d58d51fcc11900bca5845111e8c4da7 100644 (file)
@@ -8,6 +8,7 @@ package runtime
 
 import (
        "internal/cpu"
+       "internal/goexperiment"
        "unsafe"
 )
 
@@ -95,6 +96,13 @@ func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
                return -1, false
        }
 
+       // vDSO code may spill registers to the stack
+       // Make sure they're zeroed if we're running in secret mode
+       gp := getg()
+       if goexperiment.RuntimeSecret && gp.secret > 0 {
+               secretEraseRegisters()
+       }
+
        // We use getg().m instead of acquirem() here, because always taking
        // the lock is slightly more expensive than not always taking the lock.
        // However, we *do* require that m doesn't migrate elsewhere during the
index da170c52ed92ec88ebed1ec13ea0a897d44dccac..cf2f823855e690e685869809687b8f102db54622 100644 (file)
@@ -47,6 +47,10 @@ TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
 
 // func gettimeofday(tv *Timeval) (err uintptr)
 TEXT ·gettimeofday(SB),NOSPLIT,$0-16
+       // Usually, we'd check if we're running
+       // secret code here, but because we execute
+       // gettimeofday on the G stack, it's fine to leave
+       // the registers uncleared
        MOVQ    tv+0(FP), DI
        MOVQ    $0, SI
        MOVQ    runtime·vdsoGettimeofdaySym(SB), AX