--- /dev/null
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.randomizedheapbase64
+
+package goexperiment
+
+const RandomizedHeapBase64 = false
+const RandomizedHeapBase64Int = 0
--- /dev/null
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.randomizedheapbase64
+
+package goexperiment
+
+const RandomizedHeapBase64 = true
+const RandomizedHeapBase64Int = 1
// GreenTeaGC enables the Green Tea GC implementation.
GreenTeaGC bool
+
+ // RandomizedHeapBase enables heap base address randomization on 64-bit
+ // platforms.
+ RandomizedHeapBase64 bool
}
import (
"internal/abi"
"internal/goarch"
+ "internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/gc"
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
- pg := sys.OnesCount64(p.pcache.scav)
+ // Only count scav bits for pages in the cache
+ pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
slow.HeapReleased += uint64(pg) * pageSize
}
// Lock so that we can safely access the bitmap.
lock(&mheap_.lock)
+
+ heapBase := mheap_.pages.inUse.ranges[0].base.addr()
+ secondArenaBase := arenaBase(arenaIndex(heapBase) + 1)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
chunk := mheap_.pages.tryChunkOf(i)
if chunk == nil {
continue
}
+ cb := chunkBase(i)
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
want := chunk.scavenged[j] &^ chunk.pallocBits[j]
got := chunk.scavenged[j]
if want != got {
+ // When goexperiment.RandomizedHeapBase64 is set we use a
+ // series of padding pages to generate randomized heap base
+ // address which have both the alloc and scav bits set. If
+ // we see this for a chunk between the address of the heap
+ // base, and the address of the second arena continue.
+ if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) {
+ continue
+ }
ok = false
if n >= len(mismatches) {
break chunkLoop
}
mismatches[n] = BitsMismatch{
- Base: chunkBase(i) + uintptr(j)*64*pageSize,
+ Base: cb + uintptr(j)*64*pageSize,
Got: got,
Want: want,
}
import (
"internal/goarch"
+ "internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/gc"
// metadata mappings back to the OS. That would be quite complex to do in general
// as the heap is likely fragmented after a reduction in heap size.
minHeapForMetadataHugePages = 1 << 30
+
+ // randomizeHeapBase indicates if the heap base address should be randomized.
+ // See comment in mallocinit for how the randomization is performed.
+ randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform
+
+ // randHeapBasePrefixMask is used to extract the top byte of the randomized
+ // heap base address.
+ randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8))
)
// physPageSize is the size in bytes of the OS's physical pages.
physHugePageShift uint
)
+var (
+ // heapRandSeed is a random value that is populated in mallocinit if
+ // randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to
+ // randomize the base heap address.
+ heapRandSeed uintptr
+ heapRandSeedBitsRemaining int
+)
+
+func nextHeapRandBits(bits int) uintptr {
+ if bits > heapRandSeedBitsRemaining {
+ throw("not enough heapRandSeed bits remaining")
+ }
+ r := heapRandSeed >> (64 - bits)
+ heapRandSeed <<= bits
+ heapRandSeedBitsRemaining -= bits
+ return r
+}
+
func mallocinit() {
if gc.SizeClassToSize[tinySizeClass] != maxTinySize {
throw("bad TinySizeClass")
//
// In race mode we have no choice but to just use the same hints because
// the race detector requires that the heap be mapped contiguously.
+ //
+ // If randomizeHeapBase is set, we attempt to randomize the base address
+ // as much as possible. We do this by generating a random uint64 via
+ // bootstrapRand and using it's bits to randomize portions of the base
+ // address as follows:
+ // * We first generate a random heapArenaBytes aligned address that we use for
+ // generating the hints.
+ // * On the first call to mheap.grow, we then generate a random PallocChunkBytes
+ // aligned offset into the mmap'd heap region, which we use as the base for
+ // the heap region.
+ // * We then select a page offset in that PallocChunkBytes region to start the
+ // heap at, and mark all the pages up to that offset as allocated.
+ //
+ // Our final randomized "heap base address" becomes the first byte of
+ // the first available page returned by the page allocator. This results
+ // in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64)
+ // bits of entropy.
+
+ var randHeapBase uintptr
+ var randHeapBasePrefix byte
+ // heapAddrBits is 48 on most platforms, but we only use 47 of those
+ // bits in order to provide a good amount of room for the heap to grow
+ // contiguously. On amd64, there are 48 bits, but the top bit is sign
+ // extended, so we throw away another bit, just to be safe.
+ randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1)
+ if randomizeHeapBase {
+ // Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes
+ // bits, using them as the top bits for randHeapBase.
+ heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64
+
+ topBits := (randHeapAddrBits - logHeapArenaBytes)
+ randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits)
+ randHeapBase = alignUp(randHeapBase, heapArenaBytes)
+ randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8))
+ }
+
for i := 0x7f; i >= 0; i-- {
var p uintptr
switch {
if p >= uintptrMask&0x00e000000000 {
continue
}
+ case randomizeHeapBase:
+ prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8)
+ p = prefix | (randHeapBase & randHeapBasePrefixMask)
case GOARCH == "arm64" && GOOS == "ios":
p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
case GOARCH == "arm64":
func (h *mheap) grow(npage uintptr) (uintptr, bool) {
assertLockHeld(&h.lock)
+ firstGrow := h.curArena.base == 0
+
// We must grow the heap in whole palloc chunks.
// We call sysMap below but note that because we
// round up to pallocChunkPages which is on the order
// Switch to the new space.
h.curArena.base = uintptr(av)
h.curArena.end = uintptr(av) + asize
+
+ if firstGrow && randomizeHeapBase {
+ // The top heapAddrBits-logHeapArenaBytes are randomized, we now
+ // want to randomize the next
+ // logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure
+ // h.curArena.base is aligned to pallocChunkBytes.
+ bits := logHeapArenaBytes - logPallocChunkBytes
+ offset := nextHeapRandBits(bits)
+ h.curArena.base = alignDown(h.curArena.base|(offset<<logPallocChunkBytes), pallocChunkBytes)
+ }
}
// Recalculate nBase.
// space ready for allocation.
h.pages.grow(v, nBase-v)
totalGrowth += nBase - v
+
+ if firstGrow && randomizeHeapBase {
+ // The top heapAddrBits-log2(pallocChunkBytes) bits are now randomized,
+ // we finally want to randomize the next
+ // log2(pallocChunkBytes)-log2(pageSize) bits, while maintaining
+ // alignment to pageSize. We do this by calculating a random number of
+ // pages into the current arena, and marking them as allocated. The
+ // address of the next available page becomes our fully randomized base
+ // heap address.
+ randOffset := nextHeapRandBits(logPallocChunkBytes)
+ randNumPages := alignDown(randOffset, pageSize) / pageSize
+ if randNumPages != 0 {
+ h.pages.markRandomPaddingPages(v, randNumPages)
+ }
+ }
+
return totalGrowth, true
}
p.update(base, npages, true, false)
}
+// markRandomPaddingPages marks the range of memory [base, base+npages*pageSize]
+// as both allocated and scavenged. This is used for randomizing the base heap
+// address. Both the alloc and scav bits are set so that the pages are not used
+// and so the memory accounting stats are correctly calculated.
+//
+// Similar to allocRange, it also updates the summaries to reflect the
+// newly-updated bitmap.
+//
+// p.mheapLock must be held.
+func (p *pageAlloc) markRandomPaddingPages(base uintptr, npages uintptr) {
+ assertLockHeld(p.mheapLock)
+
+ limit := base + npages*pageSize - 1
+ sc, ec := chunkIndex(base), chunkIndex(limit)
+ si, ei := chunkPageIndex(base), chunkPageIndex(limit)
+ if sc == ec {
+ chunk := p.chunkOf(sc)
+ chunk.allocRange(si, ei+1-si)
+ p.scav.index.alloc(sc, ei+1-si)
+ chunk.scavenged.setRange(si, ei+1-si)
+ } else {
+ chunk := p.chunkOf(sc)
+ chunk.allocRange(si, pallocChunkPages-si)
+ p.scav.index.alloc(sc, pallocChunkPages-si)
+ chunk.scavenged.setRange(si, pallocChunkPages-si)
+ for c := sc + 1; c < ec; c++ {
+ chunk := p.chunkOf(c)
+ chunk.allocAll()
+ p.scav.index.alloc(c, pallocChunkPages)
+ chunk.scavenged.setAll()
+ }
+ chunk = p.chunkOf(ec)
+ chunk.allocRange(0, ei+1)
+ p.scav.index.alloc(ec, ei+1)
+ chunk.scavenged.setRange(0, ei+1)
+ }
+ p.update(base, npages, true, true)
+}
+
const (
pallocSumBytes = unsafe.Sizeof(pallocSum(0))
ticks.init() // run as early as possible
moduledataverify()
stackinit()
+ randinit() // must run before mallocinit, alginit, mcommoninit
mallocinit()
godebug := getGodebugEarly()
cpuinit(godebug) // must run before alginit
- randinit() // must run before alginit, mcommoninit
alginit() // maps, hash, rand must not be used before this call
mcommoninit(gp.m, -1)
modulesinit() // provides activeModules