From: Roland Shoemaker Date: Wed, 21 May 2025 02:03:44 +0000 (+0000) Subject: runtime: randomize heap base address X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=6669aa3b14d98d90929ca860421e5308374b0f46;p=gostls13.git runtime: randomize heap base address During initialization, allow randomizing the heap base address by generating a random uint64 and using its bits to randomize various portions of the heap base address. We use the following method to randomize the base address: * We first generate a random heapArenaBytes aligned address that we use for generating the hints. * On the first call to mheap.grow, we then generate a random PallocChunkBytes aligned offset into the mmap'd heap region, which we use as the base for the heap region. * We then mark a random number of pages within the page allocator as allocated. Our final randomized "heap base address" becomes the first byte of the first available page returned by the page allocator. This results in an address with at least heapAddrBits-gc.PageShift-1 bits of entropy. Fixes #27583 Change-Id: Ideb4450a5ff747a132f702d563d2a516dec91a88 Reviewed-on: https://go-review.googlesource.com/c/go/+/674835 Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI --- diff --git a/src/internal/goexperiment/exp_randomizedheapbase64_off.go b/src/internal/goexperiment/exp_randomizedheapbase64_off.go new file mode 100644 index 0000000000..0a578535a4 --- /dev/null +++ b/src/internal/goexperiment/exp_randomizedheapbase64_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.randomizedheapbase64 + +package goexperiment + +const RandomizedHeapBase64 = false +const RandomizedHeapBase64Int = 0 diff --git a/src/internal/goexperiment/exp_randomizedheapbase64_on.go b/src/internal/goexperiment/exp_randomizedheapbase64_on.go new file mode 100644 index 0000000000..10d59c7028 --- /dev/null +++ b/src/internal/goexperiment/exp_randomizedheapbase64_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.randomizedheapbase64 + +package goexperiment + +const RandomizedHeapBase64 = true +const RandomizedHeapBase64Int = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index 63a3388839..d0ae75d4e1 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -129,4 +129,8 @@ type Flags struct { // GreenTeaGC enables the Green Tea GC implementation. GreenTeaGC bool + + // RandomizedHeapBase enables heap base address randomization on 64-bit + // platforms. + RandomizedHeapBase64 bool } diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 9a4611e26e..2fc5b4a38a 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -9,6 +9,7 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/goexperiment" "internal/goos" "internal/runtime/atomic" "internal/runtime/gc" @@ -417,7 +418,8 @@ func ReadMemStatsSlow() (base, slow MemStats) { slow.HeapReleased += uint64(pg) * pageSize } for _, p := range allp { - pg := sys.OnesCount64(p.pcache.scav) + // Only count scav bits for pages in the cache + pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav) slow.HeapReleased += uint64(pg) * pageSize } @@ -1120,12 +1122,16 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { // Lock so that we can safely access the bitmap. lock(&mheap_.lock) + + heapBase := mheap_.pages.inUse.ranges[0].base.addr() + secondArenaBase := arenaBase(arenaIndex(heapBase) + 1) chunkLoop: for i := mheap_.pages.start; i < mheap_.pages.end; i++ { chunk := mheap_.pages.tryChunkOf(i) if chunk == nil { continue } + cb := chunkBase(i) for j := 0; j < pallocChunkPages/64; j++ { // Run over each 64-bit bitmap section and ensure // scavenged is being cleared properly on allocation. @@ -1135,12 +1141,20 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { want := chunk.scavenged[j] &^ chunk.pallocBits[j] got := chunk.scavenged[j] if want != got { + // When goexperiment.RandomizedHeapBase64 is set we use a + // series of padding pages to generate randomized heap base + // address which have both the alloc and scav bits set. If + // we see this for a chunk between the address of the heap + // base, and the address of the second arena continue. + if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) { + continue + } ok = false if n >= len(mismatches) { break chunkLoop } mismatches[n] = BitsMismatch{ - Base: chunkBase(i) + uintptr(j)*64*pageSize, + Base: cb + uintptr(j)*64*pageSize, Got: got, Want: want, } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index bc7dab9d20..d21b2c49b5 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -102,6 +102,7 @@ package runtime import ( "internal/goarch" + "internal/goexperiment" "internal/goos" "internal/runtime/atomic" "internal/runtime/gc" @@ -345,6 +346,14 @@ const ( // metadata mappings back to the OS. That would be quite complex to do in general // as the heap is likely fragmented after a reduction in heap size. minHeapForMetadataHugePages = 1 << 30 + + // randomizeHeapBase indicates if the heap base address should be randomized. + // See comment in mallocinit for how the randomization is performed. + randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform + + // randHeapBasePrefixMask is used to extract the top byte of the randomized + // heap base address. + randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8)) ) // physPageSize is the size in bytes of the OS's physical pages. @@ -372,6 +381,24 @@ var ( physHugePageShift uint ) +var ( + // heapRandSeed is a random value that is populated in mallocinit if + // randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to + // randomize the base heap address. + heapRandSeed uintptr + heapRandSeedBitsRemaining int +) + +func nextHeapRandBits(bits int) uintptr { + if bits > heapRandSeedBitsRemaining { + throw("not enough heapRandSeed bits remaining") + } + r := heapRandSeed >> (64 - bits) + heapRandSeed <<= bits + heapRandSeedBitsRemaining -= bits + return r +} + func mallocinit() { if gc.SizeClassToSize[tinySizeClass] != maxTinySize { throw("bad TinySizeClass") @@ -517,6 +544,42 @@ func mallocinit() { // // In race mode we have no choice but to just use the same hints because // the race detector requires that the heap be mapped contiguously. + // + // If randomizeHeapBase is set, we attempt to randomize the base address + // as much as possible. We do this by generating a random uint64 via + // bootstrapRand and using it's bits to randomize portions of the base + // address as follows: + // * We first generate a random heapArenaBytes aligned address that we use for + // generating the hints. + // * On the first call to mheap.grow, we then generate a random PallocChunkBytes + // aligned offset into the mmap'd heap region, which we use as the base for + // the heap region. + // * We then select a page offset in that PallocChunkBytes region to start the + // heap at, and mark all the pages up to that offset as allocated. + // + // Our final randomized "heap base address" becomes the first byte of + // the first available page returned by the page allocator. This results + // in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64) + // bits of entropy. + + var randHeapBase uintptr + var randHeapBasePrefix byte + // heapAddrBits is 48 on most platforms, but we only use 47 of those + // bits in order to provide a good amount of room for the heap to grow + // contiguously. On amd64, there are 48 bits, but the top bit is sign + // extended, so we throw away another bit, just to be safe. + randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1) + if randomizeHeapBase { + // Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes + // bits, using them as the top bits for randHeapBase. + heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64 + + topBits := (randHeapAddrBits - logHeapArenaBytes) + randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits) + randHeapBase = alignUp(randHeapBase, heapArenaBytes) + randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8)) + } + for i := 0x7f; i >= 0; i-- { var p uintptr switch { @@ -528,6 +591,9 @@ func mallocinit() { if p >= uintptrMask&0x00e000000000 { continue } + case randomizeHeapBase: + prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8) + p = prefix | (randHeapBase & randHeapBasePrefixMask) case GOARCH == "arm64" && GOOS == "ios": p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) case GOARCH == "arm64": diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index f25dbb429d..cc3116acb3 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1547,6 +1547,8 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, func (h *mheap) grow(npage uintptr) (uintptr, bool) { assertLockHeld(&h.lock) + firstGrow := h.curArena.base == 0 + // We must grow the heap in whole palloc chunks. // We call sysMap below but note that because we // round up to pallocChunkPages which is on the order @@ -1595,6 +1597,16 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) { // Switch to the new space. h.curArena.base = uintptr(av) h.curArena.end = uintptr(av) + asize + + if firstGrow && randomizeHeapBase { + // The top heapAddrBits-logHeapArenaBytes are randomized, we now + // want to randomize the next + // logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure + // h.curArena.base is aligned to pallocChunkBytes. + bits := logHeapArenaBytes - logPallocChunkBytes + offset := nextHeapRandBits(bits) + h.curArena.base = alignDown(h.curArena.base|(offset<