From a3fb92a7100f3f2824d483ee0cbcf1264584b3e4 Mon Sep 17 00:00:00 2001 From: Daniel Morsing Date: Thu, 25 Sep 2025 17:26:03 +0100 Subject: [PATCH] runtime/secret: implement new secret package Implement secret.Do. - When secret.Do returns: - Clear stack that is used by the argument function. - Clear all the registers that might contain secrets. - On stack growth in secret mode, clear the old stack. - When objects are allocated in secret mode, mark them and then zero the marked objects immediately when they are freed. - If the argument function panics, raise that panic as if it originated from secret.Do. This removes anything about the secret function from tracebacks. For now, this is only implemented on linux for arm64 and amd64. This is a rebased version of Keith Randalls initial implementation at CL 600635. I have added arm64 support, signal handling, preemption handling and dealt with vDSOs spilling into system stacks. Fixes #21865 Change-Id: I6fbd5a233beeaceb160785e0c0199a5c94d8e520 Co-authored-by: Keith Randall Reviewed-on: https://go-review.googlesource.com/c/go/+/704615 Reviewed-by: Roland Shoemaker LUCI-TryBot-Result: Go LUCI Auto-Submit: Filippo Valsorda Reviewed-by: Cherry Mui --- doc/next/6-stdlib/1-secret.md | 20 + src/cmd/dist/test.go | 9 + src/go/build/deps_test.go | 1 + .../goexperiment/exp_runtimesecret_off.go | 8 + .../goexperiment/exp_runtimesecret_on.go | 8 + src/internal/goexperiment/flags.go | 3 + src/runtime/_mkmalloc/mkmalloc.go | 3 + src/runtime/asm_amd64.s | 54 +- src/runtime/asm_arm64.s | 55 +- src/runtime/malloc.go | 13 +- src/runtime/malloc_generated.go | 993 ++++++++++++++++++ src/runtime/malloc_stubs.go | 19 + src/runtime/mgc.go | 27 + src/runtime/mheap.go | 10 + src/runtime/preempt.go | 17 + src/runtime/proc.go | 13 + src/runtime/runtime2.go | 18 +- src/runtime/secret.go | 118 +++ src/runtime/secret/asm_amd64.s | 213 ++++ src/runtime/secret/asm_arm64.s | 167 +++ src/runtime/secret/crash_test.go | 427 ++++++++ src/runtime/secret/export.go | 16 + src/runtime/secret/secret.go | 128 +++ src/runtime/secret/secret_test.go | 293 ++++++ src/runtime/secret/stubs.go | 32 + src/runtime/secret/stubs_noasm.go | 13 + src/runtime/secret/testdata/crash.go | 142 +++ src/runtime/secret_amd64.s | 107 ++ src/runtime/secret_arm64.s | 90 ++ src/runtime/secret_asm.go | 9 + src/runtime/secret_noasm.go | 11 + src/runtime/secret_nosecret.go | 32 + src/runtime/signal_linux_amd64.go | 28 + src/runtime/signal_linux_arm64.go | 19 + src/runtime/signal_unix.go | 6 + src/runtime/sizeof_test.go | 2 +- src/runtime/stack.go | 19 + src/runtime/sys_linux_amd64.s | 12 + src/runtime/sys_linux_arm64.s | 14 + src/runtime/time_linux_amd64.s | 10 + src/runtime/vgetrandom_linux.go | 8 + src/syscall/asm_linux_amd64.s | 4 + 42 files changed, 3170 insertions(+), 21 deletions(-) create mode 100644 doc/next/6-stdlib/1-secret.md create mode 100644 src/internal/goexperiment/exp_runtimesecret_off.go create mode 100644 src/internal/goexperiment/exp_runtimesecret_on.go create mode 100644 src/runtime/secret.go create mode 100644 src/runtime/secret/asm_amd64.s create mode 100644 src/runtime/secret/asm_arm64.s create mode 100644 src/runtime/secret/crash_test.go create mode 100644 src/runtime/secret/export.go create mode 100644 src/runtime/secret/secret.go create mode 100644 src/runtime/secret/secret_test.go create mode 100644 src/runtime/secret/stubs.go create mode 100644 src/runtime/secret/stubs_noasm.go create mode 100644 src/runtime/secret/testdata/crash.go create mode 100644 src/runtime/secret_amd64.s create mode 100644 src/runtime/secret_arm64.s create mode 100644 src/runtime/secret_asm.go create mode 100644 src/runtime/secret_noasm.go create mode 100644 src/runtime/secret_nosecret.go diff --git a/doc/next/6-stdlib/1-secret.md b/doc/next/6-stdlib/1-secret.md new file mode 100644 index 0000000000..738d02f54a --- /dev/null +++ b/doc/next/6-stdlib/1-secret.md @@ -0,0 +1,20 @@ +### New secret package + + + +The new [secret](/pkg/runtime/secret) package is available as an experiment. +It provides a facility for securely erasing temporaries used in +code that manipulates secret information, typically cryptographic in nature. +Users can access it by passing `GOEXPERIMENT=runtimesecret` at build time. + + + +The secret.Do function runs its function argument and then erases all +temporary storage (registers, stack, new heap allocations) used by +that function argument. Heap storage is not erased until that storage +is deemed unreachable by the garbage collector, which might take some +time after secret.Do completes. + +This package is intended to make it easier to ensure [forward +secrecy](https://en.wikipedia.org/wiki/Forward_secrecy). diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 73ea5c4015..f8d19ac34c 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -753,6 +753,15 @@ func (t *tester) registerTests() { }) } + // Test GOEXPERIMENT=runtimesecret. + if !strings.Contains(goexperiment, "runtimesecret") { + t.registerTest("GOEXPERIMENT=runtimesecret go test runtime/secret/...", &goTest{ + variant: "runtimesecret", + env: []string{"GOEXPERIMENT=runtimesecret"}, + pkg: "runtime/secret/...", + }) + } + // Test ios/amd64 for the iOS simulator. if goos == "darwin" && goarch == "amd64" && t.cgoEnabled { t.registerTest("GOOS=ios on darwin/amd64", diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 5466f025e1..e329c8a172 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -108,6 +108,7 @@ var depsRules = ` < internal/runtime/cgroup < internal/runtime/gc/scan < runtime + < runtime/secret < sync/atomic < internal/sync < weak diff --git a/src/internal/goexperiment/exp_runtimesecret_off.go b/src/internal/goexperiment/exp_runtimesecret_off.go new file mode 100644 index 0000000000..d203589249 --- /dev/null +++ b/src/internal/goexperiment/exp_runtimesecret_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.runtimesecret + +package goexperiment + +const RuntimeSecret = false +const RuntimeSecretInt = 0 diff --git a/src/internal/goexperiment/exp_runtimesecret_on.go b/src/internal/goexperiment/exp_runtimesecret_on.go new file mode 100644 index 0000000000..3788953db8 --- /dev/null +++ b/src/internal/goexperiment/exp_runtimesecret_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.runtimesecret + +package goexperiment + +const RuntimeSecret = true +const RuntimeSecretInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index 2e14d4298a..2cfb71578b 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -125,4 +125,7 @@ type Flags struct { // SIMD enables the simd package and the compiler's handling // of SIMD intrinsics. SIMD bool + + // RuntimeSecret enables the runtime/secret package. + RuntimeSecret bool } diff --git a/src/runtime/_mkmalloc/mkmalloc.go b/src/runtime/_mkmalloc/mkmalloc.go index 1f040c8861..46c50d6661 100644 --- a/src/runtime/_mkmalloc/mkmalloc.go +++ b/src/runtime/_mkmalloc/mkmalloc.go @@ -171,6 +171,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator {subBasicLit, "elemsize_", str(elemsize)}, {subBasicLit, "sizeclass_", str(sc)}, {subBasicLit, "noscanint_", str(noscan)}, + {subBasicLit, "isTiny_", str(0)}, }, }) } @@ -198,6 +199,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator {subBasicLit, "sizeclass_", str(tinySizeClass)}, {subBasicLit, "size_", str(s)}, {subBasicLit, "noscanint_", str(noscan)}, + {subBasicLit, "isTiny_", str(1)}, }, }) } @@ -215,6 +217,7 @@ func specializedMallocConfig(classes []class, sizeToSizeClass []uint8) generator {subBasicLit, "elemsize_", str(elemsize)}, {subBasicLit, "sizeclass_", str(sc)}, {subBasicLit, "noscanint_", str(noscan)}, + {subBasicLit, "isTiny_", str(0)}, }, }) } diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index ed46ad4a28..bf208a4d29 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -456,6 +456,13 @@ TEXT gogo<>(SB), NOSPLIT, $0 // Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-8 +#ifdef GOEXPERIMENT_runtimesecret + CMPL g_secret(R14), $0 + JEQ nosecret + CALL ·secretEraseRegistersMcall(SB) +nosecret: +#endif + MOVQ AX, DX // DX = fn // Save state in g->sched. The caller's SP and PC are restored by gogo to @@ -511,6 +518,17 @@ TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 // func systemstack(fn func()) TEXT runtime·systemstack(SB), NOSPLIT, $0-8 +#ifdef GOEXPERIMENT_runtimesecret + // If in secret mode, erase registers on transition + // from G stack to M stack, + get_tls(CX) + MOVQ g(CX), AX + CMPL g_secret(AX), $0 + JEQ nosecret + CALL ·secretEraseRegisters(SB) +nosecret: +#endif + MOVQ fn+0(FP), DI // DI = fn get_tls(CX) MOVQ g(CX), AX // AX = g @@ -643,6 +661,18 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 MOVQ AX, (m_morebuf+gobuf_sp)(BX) MOVQ DI, (m_morebuf+gobuf_g)(BX) + // If in secret mode, erase registers on transition + // from G stack to M stack, +#ifdef GOEXPERIMENT_runtimesecret + CMPL g_secret(DI), $0 + JEQ nosecret + CALL ·secretEraseRegisters(SB) + get_tls(CX) + MOVQ g(CX), DI // DI = g + MOVQ g_m(DI), BX // BX = m +nosecret: +#endif + // Call newstack on m->g0's stack. MOVQ m_g0(BX), BX MOVQ BX, g(CX) @@ -917,11 +947,6 @@ TEXT ·asmcgocall_landingpad(SB),NOSPLIT,$0-0 // aligned appropriately for the gcc ABI. // See cgocall.go for more details. TEXT ·asmcgocall(SB),NOSPLIT,$0-20 - MOVQ fn+0(FP), AX - MOVQ arg+8(FP), BX - - MOVQ SP, DX - // Figure out if we need to switch to m->g0 stack. // We get called to create new OS threads too, and those // come in on the m->g0 stack already. Or we might already @@ -938,6 +963,21 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 CMPQ DI, SI JEQ nosave + // Running on a user G + // Figure out if we're running secret code and clear the registers + // so that the C code we're about to call doesn't spill confidential + // information into memory +#ifdef GOEXPERIMENT_runtimesecret + CMPL g_secret(DI), $0 + JEQ nosecret + CALL ·secretEraseRegisters(SB) + +nosecret: +#endif + MOVQ fn+0(FP), AX + MOVQ arg+8(FP), BX + MOVQ SP, DX + // Switch to system stack. // The original frame pointer is stored in BP, // which is useful for stack unwinding. @@ -976,6 +1016,10 @@ nosave: // but then the only path through this code would be a rare case on Solaris. // Using this code for all "already on system stack" calls exercises it more, // which should help keep it correct. + MOVQ fn+0(FP), AX + MOVQ arg+8(FP), BX + MOVQ SP, DX + SUBQ $16, SP ANDQ $~15, SP MOVQ $0, 8(SP) // where above code stores g, in case someone looks during debugging diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index 01f2690f4e..9916378a93 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -300,6 +300,17 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 // Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 +#ifdef GOEXPERIMENT_runtimesecret + MOVW g_secret(g), R26 + CBZ R26, nosecret + // Use R26 as a secondary link register + // We purposefully don't erase it in secretEraseRegistersMcall + MOVD LR, R26 + BL runtime·secretEraseRegistersMcall(SB) + MOVD R26, LR + +nosecret: +#endif MOVD R0, R26 // context // Save caller state in g->sched @@ -340,6 +351,13 @@ TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 // func systemstack(fn func()) TEXT runtime·systemstack(SB), NOSPLIT, $0-8 +#ifdef GOEXPERIMENT_runtimesecret + MOVW g_secret(g), R3 + CBZ R3, nosecret + BL ·secretEraseRegisters(SB) + +nosecret: +#endif MOVD fn+0(FP), R3 // R3 = fn MOVD R3, R26 // context MOVD g_m(g), R4 // R4 = m @@ -469,6 +487,16 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 MOVD R0, (m_morebuf+gobuf_sp)(R8) // f's caller's RSP MOVD g, (m_morebuf+gobuf_g)(R8) + // If in secret mode, erase registers on transition + // from G stack to M stack, +#ifdef GOEXPERIMENT_runtimesecret + MOVW g_secret(g), R4 + CBZ R4, nosecret + BL ·secretEraseRegisters(SB) + MOVD g_m(g), R8 +nosecret: +#endif + // Call newstack on m->g0's stack. MOVD m_g0(R8), g BL runtime·save_g(SB) @@ -1143,12 +1171,7 @@ TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16 // aligned appropriately for the gcc ABI. // See cgocall.go for more details. TEXT ·asmcgocall(SB),NOSPLIT,$0-20 - MOVD fn+0(FP), R1 - MOVD arg+8(FP), R0 - - MOVD RSP, R2 // save original stack pointer CBZ g, nosave - MOVD g, R4 // Figure out if we need to switch to m->g0 stack. // We get called to create new OS threads too, and those @@ -1162,6 +1185,23 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 CMP R3, g BEQ nosave + // running on a user stack. Figure out if we're running + // secret code and clear our registers if so. +#ifdef GOEXPERIMENT_runtimesecret + MOVW g_secret(g), R5 + CBZ R5, nosecret + BL ·secretEraseRegisters(SB) + // restore g0 back into R3 + MOVD g_m(g), R3 + MOVD m_g0(R3), R3 + +nosecret: +#endif + MOVD fn+0(FP), R1 + MOVD arg+8(FP), R0 + MOVD RSP, R2 + MOVD g, R4 + // Switch to system stack. MOVD R0, R9 // gosave_systemstack_switch<> and save_g might clobber R0 BL gosave_systemstack_switch<>(SB) @@ -1208,7 +1248,10 @@ nosave: // but then the only path through this code would be a rare case on Solaris. // Using this code for all "already on system stack" calls exercises it more, // which should help keep it correct. - MOVD RSP, R13 + MOVD fn+0(FP), R1 + MOVD arg+8(FP), R0 + MOVD RSP, R2 + MOVD R2, R13 SUB $16, R13 MOVD R13, RSP MOVD $0, R4 diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 4971e16c6a..fd79356aba 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1185,7 +1185,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } else { if size <= maxSmallSize-gc.MallocHeaderSize { if typ == nil || !typ.Pointers() { - if size < maxTinySize { + // tiny allocations might be kept alive by other co-located values. + // Make sure secret allocations get zeroed by avoiding the tiny allocator + // See go.dev/issue/76356 + gp := getg() + if size < maxTinySize && gp.secret == 0 { x, elemsize = mallocgcTiny(size, typ) } else { x, elemsize = mallocgcSmallNoscan(size, typ, needzero) @@ -1205,6 +1209,13 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } } + gp := getg() + if goexperiment.RuntimeSecret && gp.secret > 0 { + // Mark any object allocated while in secret mode as secret. + // This ensures we zero it immediately when freeing it. + addSecret(x) + } + // Notify sanitizers, if enabled. if raceenabled { racemalloc(x, size-asanRZ) diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go index 5abb61257a..6864ca05d3 100644 --- a/src/runtime/malloc_generated.go +++ b/src/runtime/malloc_generated.go @@ -5,11 +5,19 @@ package runtime import ( "internal/goarch" + "internal/goexperiment" "internal/runtime/sys" "unsafe" ) func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -151,6 +159,11 @@ func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -168,6 +181,13 @@ func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -309,6 +329,11 @@ func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -326,6 +351,13 @@ func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -467,6 +499,11 @@ func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -484,6 +521,13 @@ func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -625,6 +669,11 @@ func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -642,6 +691,13 @@ func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -783,6 +839,11 @@ func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -800,6 +861,13 @@ func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -941,6 +1009,11 @@ func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -958,6 +1031,13 @@ func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -1099,6 +1179,11 @@ func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -1116,6 +1201,13 @@ func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -1257,6 +1349,11 @@ func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -1274,6 +1371,13 @@ func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -1415,6 +1519,11 @@ func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -1432,6 +1541,13 @@ func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsaf } func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -1573,6 +1689,11 @@ func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -1590,6 +1711,13 @@ func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -1731,6 +1859,11 @@ func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -1748,6 +1881,13 @@ func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -1889,6 +2029,11 @@ func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -1906,6 +2051,13 @@ func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -2047,6 +2199,11 @@ func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -2064,6 +2221,13 @@ func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -2205,6 +2369,11 @@ func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -2222,6 +2391,13 @@ func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -2363,6 +2539,11 @@ func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -2380,6 +2561,13 @@ func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -2521,6 +2709,11 @@ func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -2538,6 +2731,13 @@ func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -2679,6 +2879,11 @@ func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -2696,6 +2901,13 @@ func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -2837,6 +3049,11 @@ func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -2854,6 +3071,13 @@ func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -2995,6 +3219,11 @@ func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -3012,6 +3241,13 @@ func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -3153,6 +3389,11 @@ func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -3170,6 +3411,13 @@ func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -3311,6 +3559,11 @@ func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -3328,6 +3581,13 @@ func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -3469,6 +3729,11 @@ func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -3486,6 +3751,13 @@ func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -3627,6 +3899,11 @@ func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -3644,6 +3921,13 @@ func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -3785,6 +4069,11 @@ func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -3802,6 +4091,13 @@ func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -3943,6 +4239,11 @@ func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -3960,6 +4261,13 @@ func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsa } func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -4101,6 +4409,11 @@ func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4118,6 +4431,13 @@ func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsa } func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -4169,6 +4489,11 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4251,6 +4576,11 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4268,6 +4598,13 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -4319,6 +4656,11 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4401,6 +4743,11 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4418,6 +4765,13 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -4469,6 +4823,11 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4551,6 +4910,11 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4568,6 +4932,13 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -4619,6 +4990,11 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4701,6 +5077,11 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4718,6 +5099,13 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -4769,6 +5157,11 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4851,6 +5244,11 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -4868,6 +5266,13 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -4919,6 +5324,11 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5001,6 +5411,11 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5018,6 +5433,13 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -5069,6 +5491,11 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5151,6 +5578,11 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5168,6 +5600,13 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -5219,6 +5658,11 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5301,6 +5745,11 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5318,6 +5767,13 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -5369,6 +5825,11 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5451,6 +5912,11 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5468,6 +5934,13 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -5519,6 +5992,11 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5601,6 +6079,11 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5618,6 +6101,13 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -5669,6 +6159,11 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5751,6 +6246,11 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5768,6 +6268,13 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -5819,6 +6326,11 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5901,6 +6413,11 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -5918,6 +6435,13 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -5969,6 +6493,11 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6051,6 +6580,11 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6068,6 +6602,13 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -6119,6 +6660,11 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6201,6 +6747,11 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6218,6 +6769,13 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 1 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -6269,6 +6827,11 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6351,6 +6914,11 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6368,6 +6936,13 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -6409,6 +6984,11 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6474,6 +7054,11 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6491,6 +7076,13 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -6532,6 +7124,11 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6597,6 +7194,11 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6614,6 +7216,13 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -6655,6 +7264,11 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6720,6 +7334,11 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6737,6 +7356,13 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -6778,6 +7404,11 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6843,6 +7474,11 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6860,6 +7496,13 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -6901,6 +7544,11 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6966,6 +7614,11 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -6983,6 +7636,13 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7024,6 +7684,11 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7089,6 +7754,11 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7106,6 +7776,13 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7147,6 +7824,11 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7212,6 +7894,11 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7229,6 +7916,13 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7270,6 +7964,11 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7335,6 +8034,11 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7352,6 +8056,13 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin } func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7393,6 +8104,11 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7458,6 +8174,11 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7475,6 +8196,13 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7516,6 +8244,11 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7581,6 +8314,11 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7598,6 +8336,13 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7639,6 +8384,11 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7704,6 +8454,11 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7721,6 +8476,13 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7762,6 +8524,11 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7827,6 +8594,11 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7844,6 +8616,13 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -7885,6 +8664,11 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7950,6 +8734,11 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -7967,6 +8756,13 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8008,6 +8804,11 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8073,6 +8874,11 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8090,6 +8896,13 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8131,6 +8944,11 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8196,6 +9014,11 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8213,6 +9036,13 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8254,6 +9084,11 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8319,6 +9154,11 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8336,6 +9176,13 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8377,6 +9224,11 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8442,6 +9294,11 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8459,6 +9316,13 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8500,6 +9364,11 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8565,6 +9434,11 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8582,6 +9456,13 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8623,6 +9504,11 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8688,6 +9574,11 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8705,6 +9596,13 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8746,6 +9644,11 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8811,6 +9714,11 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8828,6 +9736,13 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8869,6 +9784,11 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8934,6 +9854,11 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -8951,6 +9876,13 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -8992,6 +9924,11 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -9057,6 +9994,11 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -9074,6 +10016,13 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -9115,6 +10064,11 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -9180,6 +10134,11 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -9197,6 +10156,13 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -9238,6 +10204,11 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -9303,6 +10274,11 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -9320,6 +10296,13 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi } func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + const isTiny = 0 == + 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -9361,6 +10344,11 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi x := v { + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } @@ -9426,6 +10414,11 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if goexperiment.RuntimeSecret && gp.secret > 0 { + + addSecret(x) + } + if valgrindenabled { valgrindMalloc(x, size) } diff --git a/src/runtime/malloc_stubs.go b/src/runtime/malloc_stubs.go index e9752956b8..58ca1d5f79 100644 --- a/src/runtime/malloc_stubs.go +++ b/src/runtime/malloc_stubs.go @@ -22,6 +22,7 @@ package runtime import ( "internal/goarch" + "internal/goexperiment" "internal/runtime/sys" "unsafe" ) @@ -36,6 +37,7 @@ const elemsize_ = 8 const sizeclass_ = 0 const noscanint_ = 0 const size_ = 0 +const isTiny_ = 0 func malloc0(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if doubleCheckMalloc { @@ -55,6 +57,17 @@ func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // WARNING: mallocStub does not do any work for sanitizers so callers need // to steer out of this codepath early if sanitizers are enabled. func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + + // secret code, need to avoid the tiny allocator since it might keep + // co-located values alive longer and prevent timely zero-ing + // + // Call directly into the NoScan allocator. + // See go.dev/issue/76356 + const isTiny = isTiny_ == 1 + gp := getg() + if goexperiment.RuntimeSecret && isTiny && gp.secret > 0 { + return mallocgcSmallNoScanSC2(size, typ, needzero) + } if doubleCheckMalloc { if gcphase == _GCmarktermination { throw("mallocgc called with gcphase == _GCmarktermination") @@ -82,6 +95,12 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Actually do the allocation. x, elemsize := inlinedMalloc(size, typ, needzero) + if goexperiment.RuntimeSecret && gp.secret > 0 { + // Mark any object allocated while in secret mode as secret. + // This ensures we zero it immediately when freeing it. + addSecret(x) + } + // Notify valgrind, if enabled. // To allow the compiler to not know about valgrind, we do valgrind instrumentation // unlike the other sanitizers. diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index febcd9558c..32cd8cb0e8 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -838,6 +838,33 @@ func gcStart(trigger gcTrigger) { // Accumulate fine-grained stopping time. work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1) + if goexperiment.RuntimeSecret { + // The world is stopped. Every M is either parked + // or in a syscall, or running some non-go code which can't run in secret mode. + // To get to a parked or a syscall state + // they have to transition through a point where we erase any + // confidential information in the registers. Making them + // handle a signal now would clobber the signal stack + // with non-confidential information. + // + // TODO(dmo): this is linear with respect to the number of Ms. + // Investigate just how long this takes and whether we can somehow + // loop over just the Ms that have secret info on their signal stack, + // or cooperatively have the Ms send signals to themselves just + // after they erase their registers, but before they enter a syscall + for mp := allm; mp != nil; mp = mp.alllink { + // even through the world is stopped, the kernel can still + // invoke our signal handlers. No confidential information can be spilled + // (because it's been erased by this time), but we can avoid + // sending additional signals by atomically inspecting this variable + if atomic.Xchg(&mp.signalSecret, 0) != 0 { + noopSignal(mp) + } + // TODO: syncronize with the signal handler to ensure that the signal + // was actually delivered. + } + } + // Finish sweep before we start concurrent scan. systemstack(func() { finishsweep_m() diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 0ccaadc891..61dc5457fc 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -225,6 +225,7 @@ type mheap struct { specialPinCounterAlloc fixalloc // allocator for specialPinCounter specialWeakHandleAlloc fixalloc // allocator for specialWeakHandle specialBubbleAlloc fixalloc // allocator for specialBubble + specialSecretAlloc fixalloc // allocator for specialSecret speciallock mutex // lock for special record allocators. arenaHintAlloc fixalloc // allocator for arenaHints @@ -803,6 +804,7 @@ func (h *mheap) init() { h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys) h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys) h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys) + h.specialSecretAlloc.init(unsafe.Sizeof(specialSecret{}), nil, nil, &memstats.other_sys) h.specialWeakHandleAlloc.init(unsafe.Sizeof(specialWeakHandle{}), nil, nil, &memstats.gcMiscSys) h.specialBubbleAlloc.init(unsafe.Sizeof(specialBubble{}), nil, nil, &memstats.other_sys) h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys) @@ -1970,6 +1972,9 @@ const ( _KindSpecialCheckFinalizer = 8 // _KindSpecialBubble is used to associate objects with synctest bubbles. _KindSpecialBubble = 9 + // _KindSpecialSecret is a special used to mark an object + // as needing zeroing immediately upon freeing. + _KindSpecialSecret = 10 ) type special struct { @@ -2822,6 +2827,11 @@ func freeSpecial(s *special, p unsafe.Pointer, size uintptr) { lock(&mheap_.speciallock) mheap_.specialBubbleAlloc.free(unsafe.Pointer(st)) unlock(&mheap_.speciallock) + case _KindSpecialSecret: + memclrNoHeapPointers(p, size) + lock(&mheap_.speciallock) + mheap_.specialSecretAlloc.free(unsafe.Pointer(s)) + unlock(&mheap_.speciallock) default: throw("bad special kind") panic("not reached") diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index 447c7399fc..892f900073 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -55,6 +55,7 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/goexperiment" "internal/stringslite" ) @@ -406,6 +407,22 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) { return false, 0 } + // If we're in the middle of a secret computation, we can't + // allow any conservative scanning of stacks, as that may lead + // to secrets leaking out from the stack into work buffers. + // Additionally, the preemption code will store the + // machine state (including registers which may contain confidential + // information) into the preemption buffers. + // + // TODO(dmo): there's technically nothing stopping us from doing the + // preemption, granted that don't conservatively scan and we clean up after + // ourselves. This is made slightly harder by the xRegs cached allocations + // that can move between Gs and Ps. In any case, for the intended users (cryptography code) + // they are unlikely get stuck in unterminating loops. + if goexperiment.RuntimeSecret && gp.secret > 0 { + return false, 0 + } + // Check if PC is an unsafe-point. f := findfunc(pc) if !f.valid() { diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 3b98be1074..16538098cf 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -8,6 +8,7 @@ import ( "internal/abi" "internal/cpu" "internal/goarch" + "internal/goexperiment" "internal/goos" "internal/runtime/atomic" "internal/runtime/exithook" @@ -4454,6 +4455,13 @@ func goexit1() { // goexit continuation on g0. func goexit0(gp *g) { + if goexperiment.RuntimeSecret && gp.secret > 0 { + // Erase the whole stack. This path only occurs when + // runtime.Goexit is called from within a runtime/secret.Do call. + memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) + // Since this is running on g0, our registers are already zeroed from going through + // mcall in secret mode. + } gdestroy(gp) schedule() } @@ -4482,6 +4490,7 @@ func gdestroy(gp *g) { gp.timer = nil gp.bubble = nil gp.fipsOnlyBypass = false + gp.secret = 0 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { // Flush assist credit to the global pool. This gives @@ -5216,6 +5225,10 @@ func malg(stacksize int32) *g { // The compiler turns a go statement into a call to this. func newproc(fn *funcval) { gp := getg() + if goexperiment.RuntimeSecret && gp.secret > 0 { + panic("goroutine spawned while running in secret mode") + } + pc := sys.GetCallerPC() systemstack(func() { newg := newproc1(fn, gp, pc, false, waitReasonZero) diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 58eaf80237..cd75e2dd7c 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -549,6 +549,7 @@ type g struct { syncSafePoint bool // set if g is stopped at a synchronous safe point. runningCleanups atomic.Bool sig uint32 + secret int32 // current nesting of runtime/secret.Do calls. writebuf []byte sigcode0 uintptr sigcode1 uintptr @@ -620,14 +621,15 @@ type m struct { // Fields whose offsets are not known to debuggers. - procid uint64 // for debuggers, but offset not hard-coded - gsignal *g // signal-handling g - goSigStack gsignalStack // Go-allocated signal handling stack - sigmask sigset // storage for saved signal mask - tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) - mstartfn func() - curg *g // current running goroutine - caughtsig guintptr // goroutine running during fatal signal + procid uint64 // for debuggers, but offset not hard-coded + gsignal *g // signal-handling g + goSigStack gsignalStack // Go-allocated signal handling stack + sigmask sigset // storage for saved signal mask + tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) + mstartfn func() + curg *g // current running goroutine + caughtsig guintptr // goroutine running during fatal signal + signalSecret uint32 // whether we have secret information in our signal stack // p is the currently attached P for executing Go code, nil if not executing user Go code. // diff --git a/src/runtime/secret.go b/src/runtime/secret.go new file mode 100644 index 0000000000..4c199d31d0 --- /dev/null +++ b/src/runtime/secret.go @@ -0,0 +1,118 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || arm64) && linux + +package runtime + +import ( + "internal/goarch" + "unsafe" +) + +//go:linkname secret_count runtime/secret.count +func secret_count() int32 { + return getg().secret +} + +//go:linkname secret_inc runtime/secret.inc +func secret_inc() { + gp := getg() + gp.secret++ +} + +//go:linkname secret_dec runtime/secret.dec +func secret_dec() { + gp := getg() + gp.secret-- +} + +//go:linkname secret_eraseSecrets runtime/secret.eraseSecrets +func secret_eraseSecrets() { + // zero all the stack memory that might be dirtied with + // secrets. We do this from the systemstack so that we + // don't have to figure out which holes we have to keep + // to ensure that we can return from memclr. gp.sched will + // act as a pigeonhole for our actual return. + lo := getg().stack.lo + systemstack(func() { + // Note, this systemstack call happens within the secret mode, + // so we don't have to call out to erase our registers, the systemstack + // code will do that. + mp := acquirem() + sp := mp.curg.sched.sp + // we need to keep systemstack return on top of the stack being cleared + // for traceback + sp -= goarch.PtrSize + // TODO: keep some sort of low water mark so that we don't have + // to zero a potentially large stack if we used just a little + // bit of it. That will allow us to use a higher value for + // lo than gp.stack.lo. + memclrNoHeapPointers(unsafe.Pointer(lo), sp-lo) + releasem(mp) + }) + // Don't put any code here: the stack frame's contents are gone! +} + +// specialSecret tracks whether we need to zero an object immediately +// upon freeing. +type specialSecret struct { + special special +} + +// addSecret records the fact that we need to zero p immediately +// when it is freed. +func addSecret(p unsafe.Pointer) { + // TODO(dmo): figure out the cost of these. These are mostly + // intended to catch allocations that happen via the runtime + // that the user has no control over and not big buffers that user + // code is allocating. The cost should be relatively low, + // but we have run into a wall with other special allocations before. + lock(&mheap_.speciallock) + s := (*specialSecret)(mheap_.specialSecretAlloc.alloc()) + s.special.kind = _KindSpecialSecret + unlock(&mheap_.speciallock) + addspecial(p, &s.special, false) +} + +// send a no-op signal to an M for the purposes of +// clobbering the signal stack +// +// Use sigpreempt. If we don't have a preemption queued, this just +// turns into a no-op +func noopSignal(mp *m) { + signalM(mp, sigPreempt) +} + +// secret_getStack returns the memory range of the +// current goroutine's stack. +// For testing only. +// Note that this is kind of tricky, as the goroutine can +// be copied and/or exit before the result is used, at which +// point it may no longer be valid. +// +//go:linkname secret_getStack runtime/secret.getStack +func secret_getStack() (uintptr, uintptr) { + gp := getg() + return gp.stack.lo, gp.stack.hi +} + +// return a slice of all Ms signal stacks +// For testing only. +// +//go:linkname secret_appendSignalStacks runtime/secret.appendSignalStacks +func secret_appendSignalStacks(sigstacks []stack) []stack { + // This is probably overkill, but it's what + // doAllThreadsSyscall does + stw := stopTheWorld(stwAllThreadsSyscall) + allocmLock.lock() + acquirem() + for mp := allm; mp != nil; mp = mp.alllink { + sigstacks = append(sigstacks, mp.gsignal.stack) + } + releasem(getg().m) + allocmLock.unlock() + startTheWorld(stw) + return sigstacks +} diff --git a/src/runtime/secret/asm_amd64.s b/src/runtime/secret/asm_amd64.s new file mode 100644 index 0000000000..7011afc5eb --- /dev/null +++ b/src/runtime/secret/asm_amd64.s @@ -0,0 +1,213 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Note: this assembly file is used for testing only. +// We need to access registers directly to properly test +// that secrets are erased and go test doesn't like to conditionally +// include assembly files. +// These functions defined in the package proper and we +// rely on the linker to prune these away in regular builds + +#include "go_asm.h" +#include "funcdata.h" + +TEXT ·loadRegisters(SB),0,$0-8 + MOVQ p+0(FP), AX + + MOVQ (AX), R10 + MOVQ (AX), R11 + MOVQ (AX), R12 + MOVQ (AX), R13 + + MOVOU (AX), X1 + MOVOU (AX), X2 + MOVOU (AX), X3 + MOVOU (AX), X4 + + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE return + + VMOVDQU (AX), Y5 + VMOVDQU (AX), Y6 + VMOVDQU (AX), Y7 + VMOVDQU (AX), Y8 + + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JNE return + + VMOVUPD (AX), Z14 + VMOVUPD (AX), Z15 + VMOVUPD (AX), Z16 + VMOVUPD (AX), Z17 + + KMOVQ (AX), K2 + KMOVQ (AX), K3 + KMOVQ (AX), K4 + KMOVQ (AX), K5 + +return: + RET + +TEXT ·spillRegisters(SB),0,$0-16 + MOVQ p+0(FP), AX + MOVQ AX, BX + + MOVQ R10, (AX) + MOVQ R11, 8(AX) + MOVQ R12, 16(AX) + MOVQ R13, 24(AX) + ADDQ $32, AX + + MOVOU X1, (AX) + MOVOU X2, 16(AX) + MOVOU X3, 32(AX) + MOVOU X4, 48(AX) + ADDQ $64, AX + + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JNE return + + VMOVDQU Y5, (AX) + VMOVDQU Y6, 32(AX) + VMOVDQU Y7, 64(AX) + VMOVDQU Y8, 96(AX) + ADDQ $128, AX + + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JNE return + + VMOVUPD Z14, (AX) + ADDQ $64, AX + VMOVUPD Z15, (AX) + ADDQ $64, AX + VMOVUPD Z16, (AX) + ADDQ $64, AX + VMOVUPD Z17, (AX) + ADDQ $64, AX + + KMOVQ K2, (AX) + ADDQ $8, AX + KMOVQ K3, (AX) + ADDQ $8, AX + KMOVQ K4, (AX) + ADDQ $8, AX + KMOVQ K5, (AX) + ADDQ $8, AX + +return: + SUBQ BX, AX + MOVQ AX, ret+8(FP) + RET + +TEXT ·useSecret(SB),0,$64-24 + NO_LOCAL_POINTERS + + // Load secret into AX + MOVQ secret_base+0(FP), AX + MOVQ (AX), AX + + // Scatter secret all across registers. + // Increment low byte so we can tell which register + // a leaking secret came from. + ADDQ $2, AX // add 2 so Rn has secret #n. + MOVQ AX, BX + INCQ AX + MOVQ AX, CX + INCQ AX + MOVQ AX, DX + INCQ AX + MOVQ AX, SI + INCQ AX + MOVQ AX, DI + INCQ AX + MOVQ AX, BP + INCQ AX + MOVQ AX, R8 + INCQ AX + MOVQ AX, R9 + INCQ AX + MOVQ AX, R10 + INCQ AX + MOVQ AX, R11 + INCQ AX + MOVQ AX, R12 + INCQ AX + MOVQ AX, R13 + INCQ AX + MOVQ AX, R14 + INCQ AX + MOVQ AX, R15 + + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JNE noavx512 + VMOVUPD (SP), Z0 + VMOVUPD (SP), Z1 + VMOVUPD (SP), Z2 + VMOVUPD (SP), Z3 + VMOVUPD (SP), Z4 + VMOVUPD (SP), Z5 + VMOVUPD (SP), Z6 + VMOVUPD (SP), Z7 + VMOVUPD (SP), Z8 + VMOVUPD (SP), Z9 + VMOVUPD (SP), Z10 + VMOVUPD (SP), Z11 + VMOVUPD (SP), Z12 + VMOVUPD (SP), Z13 + VMOVUPD (SP), Z14 + VMOVUPD (SP), Z15 + VMOVUPD (SP), Z16 + VMOVUPD (SP), Z17 + VMOVUPD (SP), Z18 + VMOVUPD (SP), Z19 + VMOVUPD (SP), Z20 + VMOVUPD (SP), Z21 + VMOVUPD (SP), Z22 + VMOVUPD (SP), Z23 + VMOVUPD (SP), Z24 + VMOVUPD (SP), Z25 + VMOVUPD (SP), Z26 + VMOVUPD (SP), Z27 + VMOVUPD (SP), Z28 + VMOVUPD (SP), Z29 + VMOVUPD (SP), Z30 + VMOVUPD (SP), Z31 + +noavx512: + MOVOU (SP), X0 + MOVOU (SP), X1 + MOVOU (SP), X2 + MOVOU (SP), X3 + MOVOU (SP), X4 + MOVOU (SP), X5 + MOVOU (SP), X6 + MOVOU (SP), X7 + MOVOU (SP), X8 + MOVOU (SP), X9 + MOVOU (SP), X10 + MOVOU (SP), X11 + MOVOU (SP), X12 + MOVOU (SP), X13 + MOVOU (SP), X14 + MOVOU (SP), X15 + + // Put secret on the stack. + INCQ AX + MOVQ AX, (SP) + MOVQ AX, 8(SP) + MOVQ AX, 16(SP) + MOVQ AX, 24(SP) + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) + MOVQ AX, 48(SP) + MOVQ AX, 56(SP) + + // Delay a bit. This makes it more likely that + // we will be the target of a signal while + // registers contain secrets. + // It also tests the path from G stack to M stack + // to scheduler and back. + CALL ·delay(SB) + + RET diff --git a/src/runtime/secret/asm_arm64.s b/src/runtime/secret/asm_arm64.s new file mode 100644 index 0000000000..1d7f7c1c92 --- /dev/null +++ b/src/runtime/secret/asm_arm64.s @@ -0,0 +1,167 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Note: this assembly file is used for testing only. +// We need to access registers directly to properly test +// that secrets are erased and go test doesn't like to conditionally +// include assembly files. +// These functions defined in the package proper and we +// rely on the linker to prune these away in regular builds + +#include "go_asm.h" +#include "funcdata.h" + +TEXT ·loadRegisters(SB),0,$0-8 + MOVD p+0(FP), R0 + + MOVD (R0), R10 + MOVD (R0), R11 + MOVD (R0), R12 + MOVD (R0), R13 + + FMOVD (R0), F15 + FMOVD (R0), F16 + FMOVD (R0), F17 + FMOVD (R0), F18 + + VLD1 (R0), [V20.B16] + VLD1 (R0), [V21.H8] + VLD1 (R0), [V22.S4] + VLD1 (R0), [V23.D2] + + RET + +TEXT ·spillRegisters(SB),0,$0-16 + MOVD p+0(FP), R0 + MOVD R0, R1 + + MOVD R10, (R0) + MOVD R11, 8(R0) + MOVD R12, 16(R0) + MOVD R13, 24(R0) + ADD $32, R0 + + FMOVD F15, (R0) + FMOVD F16, 16(R0) + FMOVD F17, 32(R0) + FMOVD F18, 64(R0) + ADD $64, R0 + + VST1.P [V20.B16], (R0) + VST1.P [V21.H8], (R0) + VST1.P [V22.S4], (R0) + VST1.P [V23.D2], (R0) + + SUB R1, R0, R0 + MOVD R0, ret+8(FP) + RET + +TEXT ·useSecret(SB),0,$0-24 + NO_LOCAL_POINTERS + + // Load secret into R0 + MOVD secret_base+0(FP), R0 + MOVD (R0), R0 + // Scatter secret across registers. + // Increment low byte so we can tell which register + // a leaking secret came from. + + // TODO(dmo): more substantial dirtying here + ADD $1, R0 + MOVD R0, R1 + ADD $1, R0 + MOVD R0, R2 + ADD $1, R0 + MOVD R0, R3 + ADD $1, R0 + MOVD R0, R4 + ADD $1, R0 + MOVD R0, R5 + ADD $1, R0 + MOVD R0, R6 + ADD $1, R0 + MOVD R0, R7 + ADD $1, R0 + MOVD R0, R8 + ADD $1, R0 + MOVD R0, R9 + ADD $1, R0 + MOVD R0, R10 + ADD $1, R0 + MOVD R0, R11 + ADD $1, R0 + MOVD R0, R12 + ADD $1, R0 + MOVD R0, R13 + ADD $1, R0 + MOVD R0, R14 + ADD $1, R0 + MOVD R0, R15 + + // Dirty the floating point registers + ADD $1, R0 + FMOVD R0, F0 + ADD $1, R0 + FMOVD R0, F1 + ADD $1, R0 + FMOVD R0, F2 + ADD $1, R0 + FMOVD R0, F3 + ADD $1, R0 + FMOVD R0, F4 + ADD $1, R0 + FMOVD R0, F5 + ADD $1, R0 + FMOVD R0, F6 + ADD $1, R0 + FMOVD R0, F7 + ADD $1, R0 + FMOVD R0, F8 + ADD $1, R0 + FMOVD R0, F9 + ADD $1, R0 + FMOVD R0, F10 + ADD $1, R0 + FMOVD R0, F11 + ADD $1, R0 + FMOVD R0, F12 + ADD $1, R0 + FMOVD R0, F13 + ADD $1, R0 + FMOVD R0, F14 + ADD $1, R0 + FMOVD R0, F15 + ADD $1, R0 + FMOVD R0, F16 + ADD $1, R0 + FMOVD R0, F17 + ADD $1, R0 + FMOVD R0, F18 + ADD $1, R0 + FMOVD R0, F19 + ADD $1, R0 + FMOVD R0, F20 + ADD $1, R0 + FMOVD R0, F21 + ADD $1, R0 + FMOVD R0, F22 + ADD $1, R0 + FMOVD R0, F23 + ADD $1, R0 + FMOVD R0, F24 + ADD $1, R0 + FMOVD R0, F25 + ADD $1, R0 + FMOVD R0, F26 + ADD $1, R0 + FMOVD R0, F27 + ADD $1, R0 + FMOVD R0, F28 + ADD $1, R0 + FMOVD R0, F29 + ADD $1, R0 + FMOVD R0, F30 + ADD $1, R0 + FMOVD R0, F31 + RET diff --git a/src/runtime/secret/crash_test.go b/src/runtime/secret/crash_test.go new file mode 100644 index 0000000000..1bd099aa93 --- /dev/null +++ b/src/runtime/secret/crash_test.go @@ -0,0 +1,427 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.runtimesecret && linux + +package secret + +import ( + "bytes" + "debug/elf" + "fmt" + "internal/testenv" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + "testing" +) + +// Copied from runtime/runtime-gdb_unix_test.go +func canGenerateCore(t *testing.T) bool { + // Ensure there is enough RLIMIT_CORE available to generate a full core. + var lim syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim) + if err != nil { + t.Fatalf("error getting rlimit: %v", err) + } + // Minimum RLIMIT_CORE max to allow. This is a conservative estimate. + // Most systems allow infinity. + const minRlimitCore = 100 << 20 // 100 MB + if lim.Max < minRlimitCore { + t.Skipf("RLIMIT_CORE max too low: %#+v", lim) + } + + // Make sure core pattern will send core to the current directory. + b, err := os.ReadFile("/proc/sys/kernel/core_pattern") + if err != nil { + t.Fatalf("error reading core_pattern: %v", err) + } + if string(b) != "core\n" { + t.Skipf("Unexpected core pattern %q", string(b)) + } + + coreUsesPID := false + b, err = os.ReadFile("/proc/sys/kernel/core_uses_pid") + if err == nil { + switch string(bytes.TrimSpace(b)) { + case "0": + case "1": + coreUsesPID = true + default: + t.Skipf("unexpected core_uses_pid value %q", string(b)) + } + } + return coreUsesPID +} + +func TestCore(t *testing.T) { + // use secret, grab a coredump, rummage through + // it, trying to find our secret. + + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skip("unsupported arch") + } + coreUsesPid := canGenerateCore(t) + + // Build our crashing program + // Because we need assembly files to properly dirty our state + // we need to construct a package in our temporary directory. + tmpDir := t.TempDir() + // copy our base source + err := copyToDir("./testdata/crash.go", tmpDir, nil) + if err != nil { + t.Fatalf("error copying directory %v", err) + } + // Copy our testing assembly files. Use the ones from the package + // to assure that they are always in sync + err = copyToDir("./asm_amd64.s", tmpDir, nil) + if err != nil { + t.Fatalf("error copying file %v", err) + } + err = copyToDir("./asm_arm64.s", tmpDir, nil) + if err != nil { + t.Fatalf("error copying file %v", err) + } + err = copyToDir("./stubs.go", tmpDir, func(s string) string { + return strings.Replace(s, "package secret", "package main", 1) + }) + if err != nil { + t.Fatalf("error copying file %v", err) + } + + // the crashing package will live out of tree, so its source files + // cannot refer to our internal packages. However, the assembly files + // can refer to internal names and we can pass the missing offsets as + // a small generated file + offsets := ` + package main + const ( + offsetX86HasAVX = %v + offsetX86HasAVX512 = %v + ) + ` + err = os.WriteFile(filepath.Join(tmpDir, "offsets.go"), []byte(fmt.Sprintf(offsets, offsetX86HasAVX, offsetX86HasAVX512)), 0666) + if err != nil { + t.Fatalf("error writing offset file %v", err) + } + + // generate go.mod file + cmd := exec.Command(testenv.GoToolPath(t), "mod", "init", "crashtest") + cmd.Dir = tmpDir + out, err := testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("error initing module %v\n%s", err, out) + } + + cmd = exec.Command(testenv.GoToolPath(t), "build", "-o", filepath.Join(tmpDir, "a.exe")) + cmd.Dir = tmpDir + out, err = testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("error building source %v\n%s", err, out) + } + + // Start the test binary. + cmd = testenv.CommandContext(t, t.Context(), "./a.exe") + cmd.Dir = tmpDir + var stdout strings.Builder + cmd.Stdout = &stdout + cmd.Stderr = &stdout + + err = cmd.Run() + // For debugging. + t.Logf("\n\n\n--- START SUBPROCESS ---\n\n\n%s\n\n--- END SUBPROCESS ---\n\n\n", stdout.String()) + if err == nil { + t.Fatalf("test binary did not crash") + } + eErr, ok := err.(*exec.ExitError) + if !ok { + t.Fatalf("error is not exit error: %v", err) + } + if eErr.Exited() { + t.Fatalf("process exited instead of being terminated: %v", eErr) + } + + rummage(t, tmpDir, eErr.Pid(), coreUsesPid) +} + +func copyToDir(name string, dir string, replace func(string) string) error { + f, err := os.ReadFile(name) + if err != nil { + return err + } + if replace != nil { + f = []byte(replace(string(f))) + } + return os.WriteFile(filepath.Join(dir, filepath.Base(name)), f, 0666) +} + +type violation struct { + id byte // secret ID + off uint64 // offset in core dump +} + +// A secret value that should never appear in a core dump, +// except for this global variable itself. +// The first byte of the secret is variable, to track +// different instances of it. +// +// If this value is changed, update ./internal/crashsecret/main.go +// TODO: this is little-endian specific. +var secretStore = [8]byte{ + 0x00, + 0x81, + 0xa0, + 0xc6, + 0xb3, + 0x01, + 0x66, + 0x53, +} + +func rummage(t *testing.T, tmpDir string, pid int, coreUsesPid bool) { + coreFileName := "core" + if coreUsesPid { + coreFileName += fmt.Sprintf(".%d", pid) + } + core, err := os.Open(filepath.Join(tmpDir, coreFileName)) + if err != nil { + t.Fatalf("core file not found: %v", err) + } + b, err := io.ReadAll(core) + if err != nil { + t.Fatalf("can't read core file: %v", err) + } + + // Open elf view onto core file. + coreElf, err := elf.NewFile(core) + if err != nil { + t.Fatalf("can't parse core file: %v", err) + } + + // Look for any places that have the secret. + var violations []violation // core file offsets where we found a secret + i := 0 + for { + j := bytes.Index(b[i:], secretStore[1:]) + if j < 0 { + break + } + j-- + i += j + + t.Errorf("secret %d found at offset %x in core file", b[i], i) + violations = append(violations, violation{ + id: b[i], + off: uint64(i), + }) + + i += len(secretStore) + } + + // Get more specific data about where in the core we found the secrets. + regions := elfRegions(t, core, coreElf) + for _, r := range regions { + for _, v := range violations { + if v.off >= r.min && v.off < r.max { + var addr string + if r.addrMin != 0 { + addr = fmt.Sprintf(" addr=%x", r.addrMin+(v.off-r.min)) + } + t.Logf("additional info: secret %d at offset %x in %s%s", v.id, v.off-r.min, r.name, addr) + } + } + } +} + +type elfRegion struct { + name string + min, max uint64 // core file offset range + addrMin, addrMax uint64 // inferior address range (or 0,0 if no address, like registers) +} + +func elfRegions(t *testing.T, core *os.File, coreElf *elf.File) []elfRegion { + var regions []elfRegion + for _, p := range coreElf.Progs { + regions = append(regions, elfRegion{ + name: fmt.Sprintf("%s[%s]", p.Type, p.Flags), + min: p.Off, + max: p.Off + min(p.Filesz, p.Memsz), + addrMin: p.Vaddr, + addrMax: p.Vaddr + min(p.Filesz, p.Memsz), + }) + } + + // TODO(dmo): parse thread regions for arm64. + // This doesn't invalidate the test, it just makes it harder to figure + // out where we're leaking stuff. + if runtime.GOARCH == "amd64" { + regions = append(regions, threadRegions(t, core, coreElf)...) + } + + for i, r1 := range regions { + for j, r2 := range regions { + if i == j { + continue + } + if r1.max <= r2.min || r2.max <= r1.min { + continue + } + t.Fatalf("overlapping regions %v %v", r1, r2) + } + } + + return regions +} + +func threadRegions(t *testing.T, core *os.File, coreElf *elf.File) []elfRegion { + var regions []elfRegion + + for _, prog := range coreElf.Progs { + if prog.Type != elf.PT_NOTE { + continue + } + + b := make([]byte, prog.Filesz) + _, err := core.ReadAt(b, int64(prog.Off)) + if err != nil { + t.Fatalf("can't read core file %v", err) + } + prefix := "unk" + b0 := b + for len(b) > 0 { + namesz := coreElf.ByteOrder.Uint32(b) + b = b[4:] + descsz := coreElf.ByteOrder.Uint32(b) + b = b[4:] + typ := elf.NType(coreElf.ByteOrder.Uint32(b)) + b = b[4:] + name := string(b[:namesz-1]) + b = b[(namesz+3)/4*4:] + off := prog.Off + uint64(len(b0)-len(b)) + desc := b[:descsz] + b = b[(descsz+3)/4*4:] + + if name != "CORE" && name != "LINUX" { + continue + } + end := off + uint64(len(desc)) + // Note: amd64 specific + // See /usr/include/x86_64-linux-gnu/bits/sigcontext.h + // + // struct _fpstate + switch typ { + case elf.NT_PRSTATUS: + pid := coreElf.ByteOrder.Uint32(desc[32:36]) + prefix = fmt.Sprintf("thread%d: ", pid) + regions = append(regions, elfRegion{ + name: prefix + "prstatus header", + min: off, + max: off + 112, + }) + off += 112 + greg := []string{ + "r15", + "r14", + "r13", + "r12", + "rbp", + "rbx", + "r11", + "r10", + "r9", + "r8", + "rax", + "rcx", + "rdx", + "rsi", + "rdi", + "orig_rax", + "rip", + "cs", + "eflags", + "rsp", + "ss", + "fs_base", + "gs_base", + "ds", + "es", + "fs", + "gs", + } + for _, r := range greg { + regions = append(regions, elfRegion{ + name: prefix + r, + min: off, + max: off + 8, + }) + off += 8 + } + regions = append(regions, elfRegion{ + name: prefix + "prstatus footer", + min: off, + max: off + 8, + }) + off += 8 + case elf.NT_FPREGSET: + regions = append(regions, elfRegion{ + name: prefix + "fpregset header", + min: off, + max: off + 32, + }) + off += 32 + for i := 0; i < 8; i++ { + regions = append(regions, elfRegion{ + name: prefix + fmt.Sprintf("mmx%d", i), + min: off, + max: off + 16, + }) + off += 16 + // They are long double (10 bytes), but + // stored in 16-byte slots. + } + for i := 0; i < 16; i++ { + regions = append(regions, elfRegion{ + name: prefix + fmt.Sprintf("xmm%d", i), + min: off, + max: off + 16, + }) + off += 16 + } + regions = append(regions, elfRegion{ + name: prefix + "fpregset footer", + min: off, + max: off + 96, + }) + off += 96 + /* + case NT_X86_XSTATE: // aka NT_PRPSINFO+511 + // legacy: 512 bytes + // xsave header: 64 bytes + fmt.Printf("hdr %v\n", desc[512:][:64]) + // ymm high128: 256 bytes + + println(len(desc)) + fallthrough + */ + default: + regions = append(regions, elfRegion{ + name: fmt.Sprintf("%s/%s", name, typ), + min: off, + max: off + uint64(len(desc)), + }) + off += uint64(len(desc)) + } + if off != end { + t.Fatalf("note section incomplete") + } + } + } + return regions +} diff --git a/src/runtime/secret/export.go b/src/runtime/secret/export.go new file mode 100644 index 0000000000..34f3c378f3 --- /dev/null +++ b/src/runtime/secret/export.go @@ -0,0 +1,16 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package secret + +import ( + "internal/cpu" + "unsafe" +) + +// exports for assembly testing functions +const ( + offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX) + offsetX86HasAVX512 = unsafe.Offsetof(cpu.X86.HasAVX512) +) diff --git a/src/runtime/secret/secret.go b/src/runtime/secret/secret.go new file mode 100644 index 0000000000..f669b98828 --- /dev/null +++ b/src/runtime/secret/secret.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.runtimesecret + +package secret + +import ( + "runtime" + _ "unsafe" +) + +// Do invokes f. +// +// Do ensures that any temporary storage used by f is erased in a +// timely manner. (In this context, "f" is shorthand for the +// entire call tree initiated by f.) +// - Any registers used by f are erased before Do returns. +// - Any stack used by f is erased before Do returns. +// - Any heap allocation done by f is erased as soon as the garbage +// collector realizes that it is no longer reachable. +// - Do works even if f panics or calls runtime.Goexit. As part of +// that, any panic raised by f will appear as if it originates from +// Do itself. +// +// Limitations: +// - Currently only supported on linux/amd64 and linux/arm64. On unsupported +// platforms, Do will invoke f directly. +// - Protection does not extend to any global variables written by f. +// - Any attempt to launch a goroutine by f will result in a panic. +// - If f calls runtime.Goexit, erasure can be delayed by defers +// higher up on the call stack. +// - Heap allocations will only be erased if the program drops all +// references to those allocations, and then the garbage collector +// notices that those references are gone. The former is under +// control of the program, but the latter is at the whim of the +// runtime. +// - Any value panicked by f may point to allocations from within +// f. Those allocations will not be erased until (at least) the +// panicked value is dead. +// - Pointer addresses may leak into data buffers used by the runtime +// to perform garbage collection. Users should not encode confidential +// information into pointers. For example, if an offset into an array or +// struct is confidential, then users should not create a pointer into +// the object. Since this function is intended to be used with constant-time +// cryptographic code, this requirement is usually fulfilled implicitly. +func Do(f func()) { + const osArch = runtime.GOOS + "/" + runtime.GOARCH + switch osArch { + default: + // unsupported, just invoke f directly. + f() + return + case "linux/amd64", "linux/arm64": + } + + // Place to store any panic value. + var p any + + // Step 1: increment the nesting count. + inc() + + // Step 2: call helper. The helper just calls f + // and captures (recovers) any panic result. + p = doHelper(f) + + // Step 3: erase everything used by f (stack, registers). + eraseSecrets() + + // Step 4: decrement the nesting count. + dec() + + // Step 5: re-raise any caught panic. + // This will make the panic appear to come + // from a stack whose bottom frame is + // runtime/secret.Do. + // Anything below that to do with f will be gone. + // + // Note that the panic value is not erased. It behaves + // like any other value that escapes from f. If it is + // heap allocated, it will be erased when the garbage + // collector notices it is no longer referenced. + if p != nil { + panic(p) + } + + // Note: if f calls runtime.Goexit, step 3 and above will not + // happen, as Goexit is unrecoverable. We handle that case in + // runtime/proc.go:goexit0. +} + +func doHelper(f func()) (p any) { + // Step 2b: Pop the stack up to the secret.doHelper frame + // if we are in the process of panicking. + // (It is a no-op if we are not panicking.) + // We return any panicked value to secret.Do, who will + // re-panic it. + defer func() { + // Note: we rely on the go1.21+ behavior that + // if we are panicking, recover returns non-nil. + p = recover() + }() + + // Step 2a: call the secret function. + f() + + return +} + +// Enabled reports whether [Do] appears anywhere on the call stack. +func Enabled() bool { + return count() > 0 +} + +// implemented in runtime + +//go:linkname count +func count() int32 + +//go:linkname inc +func inc() + +//go:linkname dec +func dec() + +//go:linkname eraseSecrets +func eraseSecrets() diff --git a/src/runtime/secret/secret_test.go b/src/runtime/secret/secret_test.go new file mode 100644 index 0000000000..7651a93ca5 --- /dev/null +++ b/src/runtime/secret/secret_test.go @@ -0,0 +1,293 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// the race detector does not like our pointer shenanigans +// while checking the stack. + +//go:build goexperiment.runtimesecret && (arm64 || amd64) && linux && !race + +package secret + +import ( + "runtime" + "strings" + "testing" + "time" + "unsafe" +) + +type secretType int64 + +const secretValue = 0x53c237_53c237 + +// S is a type that might have some secrets in it. +type S [100]secretType + +// makeS makes an S with secrets in it. +// +//go:noinline +func makeS() S { + // Note: noinline ensures this doesn't get inlined and + // completely optimized away. + var s S + for i := range s { + s[i] = secretValue + } + return s +} + +// heapS allocates an S on the heap with secrets in it. +// +//go:noinline +func heapS() *S { + // Note: noinline forces heap allocation + s := makeS() + return &s +} + +// for the tiny allocator +// +//go:noinline +func heapSTiny() *secretType { + s := new(secretType(secretValue)) + return s +} + +// Test that when we allocate inside secret.Do, the resulting +// allocations are zeroed by the garbage collector when they +// are freed. +// See runtime/mheap.go:freeSpecial. +func TestHeap(t *testing.T) { + var u uintptr + Do(func() { + u = uintptr(unsafe.Pointer(heapS())) + }) + + runtime.GC() + + // Check that object got zeroed. + checkRangeForSecret(t, u, u+unsafe.Sizeof(S{})) + // Also check our stack, just because we can. + checkStackForSecret(t) +} + +func TestHeapTiny(t *testing.T) { + var u uintptr + Do(func() { + u = uintptr(unsafe.Pointer(heapSTiny())) + }) + runtime.GC() + + // Check that object got zeroed. + checkRangeForSecret(t, u, u+unsafe.Sizeof(secretType(0))) + // Also check our stack, just because we can. + checkStackForSecret(t) +} + +// Test that when we return from secret.Do, we zero the stack used +// by the argument to secret.Do. +// See runtime/secret.go:secret_dec. +func TestStack(t *testing.T) { + checkStackForSecret(t) // if this fails, something is wrong with the test + + Do(func() { + s := makeS() + use(&s) + }) + + checkStackForSecret(t) +} + +//go:noinline +func use(s *S) { + // Note: noinline prevents dead variable elimination. +} + +// Test that when we copy a stack, we zero the old one. +// See runtime/stack.go:copystack. +func TestStackCopy(t *testing.T) { + checkStackForSecret(t) // if this fails, something is wrong with the test + + var lo, hi uintptr + Do(func() { + // Put some secrets on the current stack frame. + s := makeS() + use(&s) + // Remember the current stack. + lo, hi = getStack() + // Use a lot more stack to force a stack copy. + growStack() + }) + checkRangeForSecret(t, lo, hi) // pre-grow stack + checkStackForSecret(t) // post-grow stack (just because we can) +} + +func growStack() { + growStack1(1000) +} +func growStack1(n int) { + if n == 0 { + return + } + growStack1(n - 1) +} + +func TestPanic(t *testing.T) { + checkStackForSecret(t) // if this fails, something is wrong with the test + + defer func() { + checkStackForSecret(t) + + p := recover() + if p == nil { + t.Errorf("panic squashed") + return + } + var e error + var ok bool + if e, ok = p.(error); !ok { + t.Errorf("panic not an error") + } + if !strings.Contains(e.Error(), "divide by zero") { + t.Errorf("panic not a divide by zero error: %s", e.Error()) + } + var pcs [10]uintptr + n := runtime.Callers(0, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + for { + frame, more := frames.Next() + if strings.Contains(frame.Function, "dividePanic") { + t.Errorf("secret function in traceback") + } + if !more { + break + } + } + }() + Do(dividePanic) +} + +func dividePanic() { + s := makeS() + use(&s) + _ = 8 / zero +} + +var zero int + +func TestGoExit(t *testing.T) { + checkStackForSecret(t) // if this fails, something is wrong with the test + + c := make(chan uintptr, 2) + + go func() { + // Run the test in a separate goroutine + defer func() { + // Tell original goroutine what our stack is + // so it can check it for secrets. + lo, hi := getStack() + c <- lo + c <- hi + }() + Do(func() { + s := makeS() + use(&s) + // there's an entire round-trip through the scheduler between here + // and when we are able to check if the registers are still dirtied, and we're + // not guaranteed to run on the same M. Make a best effort attempt anyway + loadRegisters(unsafe.Pointer(&s)) + runtime.Goexit() + }) + t.Errorf("goexit didn't happen") + }() + lo := <-c + hi := <-c + // We want to wait until the other goroutine has finished Goexiting and + // cleared its stack. There's no signal for that, so just wait a bit. + time.Sleep(1 * time.Millisecond) + + checkRangeForSecret(t, lo, hi) + + var spillArea [64]secretType + n := spillRegisters(unsafe.Pointer(&spillArea)) + if n > unsafe.Sizeof(spillArea) { + t.Fatalf("spill area overrun %d\n", n) + } + for i, v := range spillArea { + if v == secretValue { + t.Errorf("secret found in spill slot %d", i) + } + } +} + +func checkStackForSecret(t *testing.T) { + t.Helper() + lo, hi := getStack() + checkRangeForSecret(t, lo, hi) +} +func checkRangeForSecret(t *testing.T, lo, hi uintptr) { + t.Helper() + for p := lo; p < hi; p += unsafe.Sizeof(secretType(0)) { + v := *(*secretType)(unsafe.Pointer(p)) + if v == secretValue { + t.Errorf("secret found in [%x,%x] at %x", lo, hi, p) + } + } +} + +func TestRegisters(t *testing.T) { + Do(func() { + s := makeS() + loadRegisters(unsafe.Pointer(&s)) + }) + var spillArea [64]secretType + n := spillRegisters(unsafe.Pointer(&spillArea)) + if n > unsafe.Sizeof(spillArea) { + t.Fatalf("spill area overrun %d\n", n) + } + for i, v := range spillArea { + if v == secretValue { + t.Errorf("secret found in spill slot %d", i) + } + } +} + +func TestSignalStacks(t *testing.T) { + Do(func() { + s := makeS() + loadRegisters(unsafe.Pointer(&s)) + // cause a signal with our secret state to dirty + // at least one of the signal stacks + func() { + defer func() { + x := recover() + if x == nil { + panic("did not get panic") + } + }() + var p *int + *p = 20 + }() + }) + // signal stacks aren't cleared until after + // the next GC after secret.Do returns + runtime.GC() + stk := make([]stack, 0, 100) + stk = appendSignalStacks(stk) + for _, s := range stk { + checkRangeForSecret(t, s.lo, s.hi) + } +} + +// hooks into the runtime +func getStack() (uintptr, uintptr) + +// Stack is a copy of runtime.stack for testing export. +// Fields must match. +type stack struct { + lo uintptr + hi uintptr +} + +func appendSignalStacks([]stack) []stack diff --git a/src/runtime/secret/stubs.go b/src/runtime/secret/stubs.go new file mode 100644 index 0000000000..ec66ef2729 --- /dev/null +++ b/src/runtime/secret/stubs.go @@ -0,0 +1,32 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 || amd64 + +// testing stubs, these are implemented in assembly in +// asm_$GOARCH.s +// +// Note that this file is also used as a template to build a +// crashing binary that tries to leave secrets in places where +// they are supposed to be erased. see crash_test.go for more info + +package secret + +import "unsafe" + +// Load data from p into test registers. +// +//go:noescape +func loadRegisters(p unsafe.Pointer) + +// Spill data from test registers into p. +// Returns the amount of space filled in. +// +//go:noescape +func spillRegisters(p unsafe.Pointer) uintptr + +// Load secret into all registers. +// +//go:noescape +func useSecret(secret []byte) diff --git a/src/runtime/secret/stubs_noasm.go b/src/runtime/secret/stubs_noasm.go new file mode 100644 index 0000000000..f8091ff393 --- /dev/null +++ b/src/runtime/secret/stubs_noasm.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 && !amd64 + +package secret + +import "unsafe" + +func loadRegisters(p unsafe.Pointer) {} +func spillRegisters(p unsafe.Pointer) uintptr { return 0 } +func useSecret(secret []byte) {} diff --git a/src/runtime/secret/testdata/crash.go b/src/runtime/secret/testdata/crash.go new file mode 100644 index 0000000000..cf48fb7d44 --- /dev/null +++ b/src/runtime/secret/testdata/crash.go @@ -0,0 +1,142 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "os" + "runtime" + "runtime/debug" + "runtime/secret" + "sync" + "syscall" + "time" + _ "unsafe" + "weak" +) + +// callback from assembly +// +//go:linkname delay main.delay +func delay() { + time.Sleep(1 * time.Millisecond) +} + +// Same secret as in ../../crash_test.go +var secretStore = [8]byte{ + 0x00, + 0x81, + 0xa0, + 0xc6, + 0xb3, + 0x01, + 0x66, + 0x53, +} + +func main() { + enableCore() + useSecretProc() + // clear out secret. That way we don't have + // to figure out which secret is the allowed + // source + clear(secretStore[:]) + panic("terminate") +} + +// Copied from runtime/runtime-gdb_unix_test.go +func enableCore() { + debug.SetTraceback("crash") + + var lim syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim) + if err != nil { + panic(fmt.Sprintf("error getting rlimit: %v", err)) + } + lim.Cur = lim.Max + fmt.Fprintf(os.Stderr, "Setting RLIMIT_CORE = %+#v\n", lim) + err = syscall.Setrlimit(syscall.RLIMIT_CORE, &lim) + if err != nil { + panic(fmt.Sprintf("error setting rlimit: %v", err)) + } +} + +// useSecretProc does 5 seconds of work, using the secret value +// inside secret.Do in a bunch of ways. +func useSecretProc() { + stop := make(chan bool) + var wg sync.WaitGroup + + for i := 0; i < 4; i++ { + wg.Add(1) + go func() { + time.Sleep(1 * time.Second) + for { + select { + case <-stop: + wg.Done() + return + default: + secret.Do(func() { + // Copy key into a variable-sized heap allocation. + // This both puts secrets in heap objects, + // and more generally just causes allocation, + // which forces garbage collection, which + // requires interrupts and the like. + s := bytes.Repeat(secretStore[:], 1+i*2) + // Also spam the secret across all registers. + useSecret(s) + }) + } + } + }() + } + + // Send some allocations over a channel. This does 2 things: + // 1) forces some GCs to happen + // 2) causes more scheduling noise (Gs moving between Ms, etc.) + c := make(chan []byte) + wg.Add(2) + go func() { + for { + select { + case <-stop: + wg.Done() + return + case c <- make([]byte, 256): + } + } + }() + go func() { + for { + select { + case <-stop: + wg.Done() + return + case <-c: + } + } + }() + + time.Sleep(5 * time.Second) + close(stop) + wg.Wait() + // use a weak reference for ensuring that the GC has cleared everything + // Use a large value to avoid the tiny allocator. + w := weak.Make(new([2048]byte)) + // 20 seems like a decent amount? + for i := 0; i < 20; i++ { + runtime.GC() // GC should clear any secret heap objects and clear out scheduling buffers. + if w.Value() == nil { + fmt.Fprintf(os.Stderr, "number of GCs %v\n", i+1) + return + } + } + fmt.Fprintf(os.Stderr, "GC didn't clear out in time\n") + // This will cause the core dump to happen with the sentinel value still in memory + // so we will detect the fault. + panic("fault") +} diff --git a/src/runtime/secret_amd64.s b/src/runtime/secret_amd64.s new file mode 100644 index 0000000000..06103d1c0f --- /dev/null +++ b/src/runtime/secret_amd64.s @@ -0,0 +1,107 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" +#include "funcdata.h" + +// TODO(dmo): generate these with mkpreempt.go, the register sets +// are tightly coupled and this will ensure that we keep them +// all synchronized + +// secretEraseRegisters erases any register that may +// have been used with user code within a secret.Do function. +// This is roughly the general purpose and floating point +// registers, barring any reserved registers and registers generally +// considered architectural (amd64 segment registers, arm64 exception registers) +TEXT ·secretEraseRegisters(SB),NOFRAME|NOSPLIT,$0-0 + XORL AX, AX + JMP ·secretEraseRegistersMcall(SB) + +// Mcall requires an argument in AX. This function +// excludes that register from being cleared +TEXT ·secretEraseRegistersMcall(SB),NOSPLIT|NOFRAME,$0-0 + // integer registers + XORL BX, BX + XORL CX, CX + XORL DX, DX + XORL DI, DI + XORL SI, SI + // BP = frame pointer + // SP = stack pointer + XORL R8, R8 + XORL R9, R9 + XORL R10, R10 + XORL R11, R11 + XORL R12, R12 + XORL R13, R13 + // R14 = G register + XORL R15, R15 + + // floating-point registers + CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $1 + JEQ avx + + PXOR X0, X0 + PXOR X1, X1 + PXOR X2, X2 + PXOR X3, X3 + PXOR X4, X4 + PXOR X5, X5 + PXOR X6, X6 + PXOR X7, X7 + PXOR X8, X8 + PXOR X9, X9 + PXOR X10, X10 + PXOR X11, X11 + PXOR X12, X12 + PXOR X13, X13 + PXOR X14, X14 + PXOR X15, X15 + JMP noavx512 + +avx: + // VZEROALL zeroes all of the X0-X15 registers, no matter how wide. + // That includes Y0-Y15 (256-bit avx) and Z0-Z15 (512-bit avx512). + VZEROALL + + // Clear all the avx512 state. + CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1 + JNE noavx512 + + // Zero X16-X31 + // Note that VZEROALL above already cleared Z0-Z15. + VMOVAPD Z0, Z16 + VMOVAPD Z0, Z17 + VMOVAPD Z0, Z18 + VMOVAPD Z0, Z19 + VMOVAPD Z0, Z20 + VMOVAPD Z0, Z21 + VMOVAPD Z0, Z22 + VMOVAPD Z0, Z23 + VMOVAPD Z0, Z24 + VMOVAPD Z0, Z25 + VMOVAPD Z0, Z26 + VMOVAPD Z0, Z27 + VMOVAPD Z0, Z28 + VMOVAPD Z0, Z29 + VMOVAPD Z0, Z30 + VMOVAPD Z0, Z31 + + // Zero k0-k7 + KXORQ K0, K0, K0 + KXORQ K0, K0, K1 + KXORQ K0, K0, K2 + KXORQ K0, K0, K3 + KXORQ K0, K0, K4 + KXORQ K0, K0, K5 + KXORQ K0, K0, K6 + KXORQ K0, K0, K7 + +noavx512: + // misc registers + CMPL BX, BX //eflags + // segment registers? Direction flag? Both seem overkill. + + RET diff --git a/src/runtime/secret_arm64.s b/src/runtime/secret_arm64.s new file mode 100644 index 0000000000..d21b139df8 --- /dev/null +++ b/src/runtime/secret_arm64.s @@ -0,0 +1,90 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" +#include "funcdata.h" + +TEXT ·secretEraseRegisters(SB),NOFRAME|NOSPLIT,$0-0 + MOVD ZR, R0 + MOVD ZR, R26 + JMP ·secretEraseRegistersMcall(SB) + +// Mcall requires an argument in R0 and does not have a +// stack frame to spill into. Additionally, there is no stack +// to spill the link register into. This function deliberately +// doesn't clear R0 and R26, and Mcall uses R26 as a link register. +TEXT ·secretEraseRegistersMcall(SB),NOFRAME|NOSPLIT,$0-0 + // integer registers + MOVD ZR, R1 + MOVD ZR, R2 + MOVD ZR, R3 + MOVD ZR, R4 + MOVD ZR, R5 + MOVD ZR, R6 + MOVD ZR, R7 + MOVD ZR, R8 + MOVD ZR, R9 + MOVD ZR, R10 + MOVD ZR, R11 + MOVD ZR, R12 + MOVD ZR, R13 + MOVD ZR, R14 + MOVD ZR, R15 + MOVD ZR, R16 + MOVD ZR, R17 + // R18 = platform register + MOVD ZR, R19 + MOVD ZR, R20 + MOVD ZR, R21 + MOVD ZR, R22 + MOVD ZR, R23 + MOVD ZR, R24 + MOVD ZR, R25 + // R26 used for extra link register in mcall where we can't spill + MOVD ZR, R27 + // R28 = g + // R29 = frame pointer + // R30 = link pointer (return address) + // R31 = stack pointer + + // floating point registers + // (also clears simd registers) + FMOVD ZR, F0 + FMOVD ZR, F1 + FMOVD ZR, F2 + FMOVD ZR, F3 + FMOVD ZR, F4 + FMOVD ZR, F5 + FMOVD ZR, F6 + FMOVD ZR, F7 + FMOVD ZR, F8 + FMOVD ZR, F9 + FMOVD ZR, F10 + FMOVD ZR, F11 + FMOVD ZR, F12 + FMOVD ZR, F13 + FMOVD ZR, F14 + FMOVD ZR, F15 + FMOVD ZR, F16 + FMOVD ZR, F17 + FMOVD ZR, F18 + FMOVD ZR, F19 + FMOVD ZR, F20 + FMOVD ZR, F21 + FMOVD ZR, F22 + FMOVD ZR, F23 + FMOVD ZR, F24 + FMOVD ZR, F25 + FMOVD ZR, F26 + FMOVD ZR, F27 + FMOVD ZR, F28 + FMOVD ZR, F29 + FMOVD ZR, F30 + FMOVD ZR, F31 + + // misc registers + CMP ZR, ZR // N,Z,C,V flags + + RET diff --git a/src/runtime/secret_asm.go b/src/runtime/secret_asm.go new file mode 100644 index 0000000000..08223a673d --- /dev/null +++ b/src/runtime/secret_asm.go @@ -0,0 +1,9 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 || amd64 + +package runtime + +func secretEraseRegisters() diff --git a/src/runtime/secret_noasm.go b/src/runtime/secret_noasm.go new file mode 100644 index 0000000000..3f7e49af7a --- /dev/null +++ b/src/runtime/secret_noasm.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 && !amd64 + +package runtime + +func secretEraseRegisters() { + throw("runtime/secret.Do not supported yet") +} diff --git a/src/runtime/secret_nosecret.go b/src/runtime/secret_nosecret.go new file mode 100644 index 0000000000..bf50fb5a54 --- /dev/null +++ b/src/runtime/secret_nosecret.go @@ -0,0 +1,32 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(amd64 || arm64) || !linux + +package runtime + +import "unsafe" + +// Stubs for platforms that do not implement runtime/secret + +//go:linkname secret_count runtime/secret.count +func secret_count() int32 { return 0 } + +//go:linkname secret_inc runtime/secret.inc +func secret_inc() {} + +//go:linkname secret_dec runtime/secret.dec +func secret_dec() {} + +//go:linkname secret_eraseSecrets runtime/secret.eraseSecrets +func secret_eraseSecrets() {} + +func addSecret(p unsafe.Pointer) {} + +type specialSecret struct{} + +//go:linkname secret_getStack runtime/secret.getStack +func secret_getStack() (uintptr, uintptr) { return 0, 0 } + +func noopSignal(mp *m) {} diff --git a/src/runtime/signal_linux_amd64.go b/src/runtime/signal_linux_amd64.go index 573b118397..f4559f570e 100644 --- a/src/runtime/signal_linux_amd64.go +++ b/src/runtime/signal_linux_amd64.go @@ -54,3 +54,31 @@ func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) } func (c *sigctxt) set_sigaddr(x uint64) { *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x) } + +// dumpSigStack prints a signal stack with the context, fpstate pointer field within that context and +// the beginning of the fpstate annotated by C/F/S respectively +func dumpSigStack(s string, sp uintptr, stackhi uintptr, ctx uintptr) { + println(s) + println("SP:\t", hex(sp)) + println("ctx:\t", hex(ctx)) + fpfield := ctx + unsafe.Offsetof(ucontext{}.uc_mcontext) + unsafe.Offsetof(mcontext{}.fpregs) + println("fpfield:\t", hex(fpfield)) + fpbegin := uintptr(unsafe.Pointer((&sigctxt{nil, unsafe.Pointer(ctx)}).regs().fpstate)) + println("fpstate:\t", hex(fpbegin)) + hexdumpWords(sp, stackhi, func(p uintptr, hm hexdumpMarker) { + switch p { + case ctx: + hm.start() + print("C") + println() + case fpfield: + hm.start() + print("F") + println() + case fpbegin: + hm.start() + print("S") + println() + } + }) +} diff --git a/src/runtime/signal_linux_arm64.go b/src/runtime/signal_linux_arm64.go index 4ccc030792..2d31051fd0 100644 --- a/src/runtime/signal_linux_arm64.go +++ b/src/runtime/signal_linux_arm64.go @@ -69,3 +69,22 @@ func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x } func (c *sigctxt) set_sigaddr(x uint64) { *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x) } + +func dumpSigStack(s string, sp uintptr, stackhi uintptr, ctx uintptr) { + println(s) + println("SP:\t", hex(sp)) + println("ctx:\t", hex(ctx)) + entriesStart := uintptr(unsafe.Pointer(&(*ucontext)(unsafe.Pointer(ctx)).uc_mcontext.__reserved)) + hexdumpWords(sp, stackhi, func(p uintptr, hm hexdumpMarker) { + switch p { + case ctx: + hm.start() + print("C") + println() + case entriesStart: + hm.start() + print("E") + println() + } + }) +} diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 96628d6baa..f352cb3c02 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -8,6 +8,7 @@ package runtime import ( "internal/abi" + "internal/goexperiment" "internal/runtime/atomic" "internal/runtime/sys" "unsafe" @@ -488,6 +489,11 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) { c.fixsigcode(sig) sighandler(sig, info, ctx, gp) + + if goexperiment.RuntimeSecret && gp.secret > 0 { + atomic.Store(&gp.m.signalSecret, 1) + } + setg(gp) if setStack { restoreGsignalStack(&gsignalStack) diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index 5888177f0e..9dde0da963 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing + {runtime.G{}, 284 + xreg, 448 + xreg}, // g, but exported for testing {runtime.Sudog{}, 64, 104}, // sudog, but exported for testing } diff --git a/src/runtime/stack.go b/src/runtime/stack.go index c92accf188..d1c80276a5 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -8,6 +8,7 @@ import ( "internal/abi" "internal/cpu" "internal/goarch" + "internal/goexperiment" "internal/goos" "internal/runtime/atomic" "internal/runtime/gc" @@ -985,6 +986,16 @@ func copystack(gp *g, newsize uintptr) { } // free old stack + if goexperiment.RuntimeSecret && gp.secret > 0 { + // Some portion of the old stack has secret stuff on it. + // We don't really know where we entered secret mode, + // so just clear the whole thing. + // TODO(dmo): traceback until we hit secret.Do? clearing + // is fast and optimized, might not be worth it. + memclrNoHeapPointers(unsafe.Pointer(old.lo), old.hi-old.lo) + // The memmove call above might put secrets from the stack into registers. + secretEraseRegisters() + } if stackPoisonCopy != 0 { fillstack(old, 0xfc) } @@ -1026,6 +1037,14 @@ func newstack() { } gp := thisg.m.curg + if goexperiment.RuntimeSecret && gp.secret > 0 { + // If we're entering here from a secret context, clear + // all the registers. This is important because we + // might context switch to a different goroutine which + // is not in secret mode, and it will not be careful + // about clearing its registers. + secretEraseRegisters() + } if thisg.m.curg.throwsplit { // Update syscallsp, syscallpc in case traceback uses them. diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index e252a4b914..618553b196 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -228,6 +228,18 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8 // due to stack probes inserted to avoid stack/heap collisions. // See issue #20427. +#ifdef GOEXPERIMENT_runtimesecret + // The kernel might spill our secrets onto g0 + // erase our registers here. + // TODO(dmo): what is the ABI guarantee here? we use + // R14 later, but the function is ABI0 + CMPL g_secret(R14), $0 + JEQ nosecret + CALL ·secretEraseRegisters(SB) + +nosecret: +#endif + MOVQ SP, R12 // Save old SP; R12 unchanged by C code. MOVQ g_m(R14), BX // BX unchanged by C code. diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s index 7a81d5479e..88f7213525 100644 --- a/src/runtime/sys_linux_arm64.s +++ b/src/runtime/sys_linux_arm64.s @@ -225,6 +225,13 @@ TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28 // func walltime() (sec int64, nsec int32) TEXT runtime·walltime(SB),NOSPLIT,$24-12 +#ifdef GOEXPERIMENT_runtimesecret + MOVW g_secret(g), R20 + CBZ R20, nosecret + BL ·secretEraseRegisters(SB) + +nosecret: +#endif MOVD RSP, R20 // R20 is unchanged by C code MOVD RSP, R1 @@ -309,6 +316,13 @@ finish: RET TEXT runtime·nanotime1(SB),NOSPLIT,$24-8 +#ifdef GOEXPERIMENT_runtimesecret + MOVW g_secret(g), R20 + CBZ R20, nosecret + BL ·secretEraseRegisters(SB) + +nosecret: +#endif MOVD RSP, R20 // R20 is unchanged by C code MOVD RSP, R1 diff --git a/src/runtime/time_linux_amd64.s b/src/runtime/time_linux_amd64.s index fa9561b25b..4935c6dec3 100644 --- a/src/runtime/time_linux_amd64.s +++ b/src/runtime/time_linux_amd64.s @@ -12,6 +12,16 @@ // func now() (sec int64, nsec int32, mono int64) TEXT time·now(SB),NOSPLIT,$16-24 +#ifdef GOEXPERIMENT_runtimesecret + // The kernel might spill our secrets onto g0 + // erase our registers here. + CMPL g_secret(R14), $0 + JEQ nosecret + CALL ·secretEraseRegisters(SB) + +nosecret: +#endif + MOVQ SP, R12 // Save old SP; R12 unchanged by C code. MOVQ g_m(R14), BX // BX unchanged by C code. diff --git a/src/runtime/vgetrandom_linux.go b/src/runtime/vgetrandom_linux.go index 225f7029be..5e755dcc3d 100644 --- a/src/runtime/vgetrandom_linux.go +++ b/src/runtime/vgetrandom_linux.go @@ -8,6 +8,7 @@ package runtime import ( "internal/cpu" + "internal/goexperiment" "unsafe" ) @@ -95,6 +96,13 @@ func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { return -1, false } + // vDSO code may spill registers to the stack + // Make sure they're zeroed if we're running in secret mode + gp := getg() + if goexperiment.RuntimeSecret && gp.secret > 0 { + secretEraseRegisters() + } + // We use getg().m instead of acquirem() here, because always taking // the lock is slightly more expensive than not always taking the lock. // However, we *do* require that m doesn't migrate elsewhere during the diff --git a/src/syscall/asm_linux_amd64.s b/src/syscall/asm_linux_amd64.s index da170c52ed..cf2f823855 100644 --- a/src/syscall/asm_linux_amd64.s +++ b/src/syscall/asm_linux_amd64.s @@ -47,6 +47,10 @@ TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48 // func gettimeofday(tv *Timeval) (err uintptr) TEXT ·gettimeofday(SB),NOSPLIT,$0-16 + // Usually, we'd check if we're running + // secret code here, but because we execute + // gettimeofday on the G stack, it's fine to leave + // the registers uncleared MOVQ tv+0(FP), DI MOVQ $0, SI MOVQ runtime·vdsoGettimeofdaySym(SB), AX -- 2.52.0