From 6d1ebeb5271a06cd55f55a84e95709e2f4805bcc Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 24 Feb 2015 09:25:09 -0800 Subject: [PATCH] runtime: handle holes in the heap We need to distinguish pointers to free spans, which indicate bugs in our pointer analysis, from pointers to never-in-the-heap spans, which can legitimately arise from sysAlloc/mmap/etc. This normally isn't a problem because the heap is contiguous, but in some situations (32 bit, particularly) the heap must grow around an already allocated region. The bad pointer test is disabled so this fix doesn't actually do anything, but it removes one barrier from reenabling it. Fixes #9872. Change-Id: I0a92db4d43b642c58d2b40af69c906a8d9777f88 Reviewed-on: https://go-review.googlesource.com/5780 Reviewed-by: Dmitry Vyukov --- src/runtime/mbitmap.go | 5 ++++- src/runtime/mheap.go | 13 ++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 369b5ed218..702fccae98 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -176,7 +176,10 @@ func heapBitsForObject(p uintptr) (base uintptr, hbits heapBits) { x -= mheap_.arena_start >> _PageShift s := h_spans[x] if s == nil || pageID(k) < s.start || p >= s.limit || s.state != mSpanInUse { - if s != nil && s.state == _MSpanStack { + if s == nil || s.state == _MSpanStack { + // If s is nil, the virtual address has never been part of the heap. + // This pointer may be to some mmap'd region, so we allow it. + // Pointers into stacks are also ok, the runtime manages these explicitly. return } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index ba800aacef..a05a570ff1 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -128,7 +128,13 @@ func (s *mspan) layout() (size, n, total uintptr) { } var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go -var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go + +// h_spans is a lookup table to map virtual address page IDs to *mspan. +// For allocated spans, their pages map to the span itself. +// For free spans, only the lowest and highest pages map to the span itself. Internal +// pages map to an arbitrary span. +// For pages that have never been allocated, h_spans entries are nil. +var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { h := (*mheap)(vh) @@ -568,8 +574,9 @@ func mHeap_Grow(h *mheap, npage uintptr) bool { mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift) p := uintptr(s.start) p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift) - h_spans[p] = s - h_spans[p+s.npages-1] = s + for i := p; i < p+s.npages; i++ { + h_spans[i] = s + } atomicstore(&s.sweepgen, h.sweepgen) s.state = _MSpanInUse mHeap_FreeSpanLocked(h, s, false, true, 0) -- 2.48.1