From: Austin Clements Date: Tue, 4 Oct 2016 20:22:41 +0000 (-0400) Subject: runtime: use len(h.spans) to indicate mapped region X-Git-Tag: go1.8beta1~622 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=5915ce66742415d96d5a082c41d31f965b719f26;p=gostls13.git runtime: use len(h.spans) to indicate mapped region Currently we set the len and cap of h.spans to the full reserved region of the address space and track the actual mapped region separately in h.spans_mapped. Since we have both the len and cap at our disposal, change things so len(h.spans) tracks how much of the spans array is mapped and eliminate h.spans_mapped. This simplifies mheap and means we'll get nice "index out of bounds" exceptions if we do try to go off the end of the spans rather than a SIGSEGV. Change-Id: I8ed9a1a9a844d90e9fd2e269add4704623dbdfe6 Reviewed-on: https://go-review.googlesource.com/30533 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson --- diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 70067d07f9..62cf8fe267 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -54,8 +54,13 @@ type mheap struct { // For free spans, only the lowest and highest pages map to the span itself. // Internal pages map to an arbitrary span. // For pages that have never been allocated, spans entries are nil. - spans []*mspan - spans_mapped uintptr // bytes mapped starting at &spans[0] + // + // This is backed by a reserved region of the address space so + // it can grow without moving. The memory up to len(spans) is + // mapped. cap(spans) indicates the total reserved memory. + spans []*mspan + + _ uint32 // align uint64 fields on 32-bit for atomics // Proportional sweep pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock @@ -406,7 +411,7 @@ func (h *mheap) init(spansStart, spansBytes uintptr) { sp := (*slice)(unsafe.Pointer(&h.spans)) sp.array = unsafe.Pointer(spansStart) - sp.len = int(spansBytes / sys.PtrSize) + sp.len = 0 sp.cap = int(spansBytes / sys.PtrSize) } @@ -424,11 +429,13 @@ func (h *mheap) mapSpans(arena_used uintptr) { n -= h.arena_start n = n / _PageSize * sys.PtrSize n = round(n, physPageSize) - if h.spans_mapped >= n { + need := n / unsafe.Sizeof(h.spans[0]) + have := uintptr(len(h.spans)) + if have >= need { return } - sysMap(add(unsafe.Pointer(&h.spans[0]), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys) - h.spans_mapped = n + h.spans = h.spans[:need] + sysMap(unsafe.Pointer(&h.spans[have]), (need-have)*unsafe.Sizeof(h.spans[0]), h.arena_reserved, &memstats.other_sys) } // Sweeps spans in list until reclaims at least npages into heap. @@ -890,7 +897,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i h.spanalloc.free(unsafe.Pointer(t)) } } - if (p+s.npages)*sys.PtrSize < h.spans_mapped { + if (p + s.npages) < uintptr(len(h.spans)) { t := h.spans[p+s.npages] if t != nil && t.state == _MSpanFree { s.npages += t.npages