From: Austin Clements Date: Tue, 4 Oct 2016 20:03:00 +0000 (-0400) Subject: runtime: consolidate h_spans and mheap_.spans X-Git-Tag: go1.8beta1~623 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=6b0f668044b7a8dd5d2360426cbcf6bd6785374d;p=gostls13.git runtime: consolidate h_spans and mheap_.spans Like h_allspans and mheap_.allspans, these were two ways of referring to the spans array from when the runtime was split between C and Go. Clean this up by making mheap_.spans a slice and eliminating h_spans. Change-Id: I3aa7038d53c3a4252050aa33e468c48dfed0b70e Reviewed-on: https://go-review.googlesource.com/30532 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson --- diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 2d064145a4..cdec4f816f 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -123,7 +123,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { aoff := uintptr(src) - mheap_.arena_start idx := aoff >> _PageShift - s := h_spans[idx] + s := mheap_.spans[idx] if s.state == _MSpanStack { // There are no heap bits for value stored on the stack. // For a channel receive src might be on the stack of some diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 43b7e7970d..e85535ea9f 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -360,7 +360,7 @@ func mallocinit() { // To overcome this we ask for PageSize more and round up the pointer. p1 := round(p, _PageSize) - mheap_.spans = (**mspan)(unsafe.Pointer(p1)) + spansStart := p1 mheap_.bitmap = p1 + spansSize + bitmapSize if sys.PtrSize == 4 { // Set arena_start such that we can accept memory @@ -379,7 +379,7 @@ func mallocinit() { } // Initialize the rest of the allocator. - mheap_.init(spansSize) + mheap_.init(spansStart, spansSize) _g_ := getg() _g_.m.mcache = allocmcache() } diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 7171d3adbd..f1f9158eeb 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -398,7 +398,7 @@ func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits idx := off >> _PageShift // p points into the heap, but possibly to the middle of an object. // Consult the span table to find the block beginning. - s = h_spans[idx] + s = mheap_.spans[idx] if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse { if s == nil || s.state == _MSpanStack { // If s is nil, the virtual address has never been part of the heap. diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index 0ee7a0599a..4f3e887bc8 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -420,7 +420,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) { } p := uintptr(v) >> pageShift q := p - arena_start>>pageShift - s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*sys.PtrSize)) + s = mheap_.spans[q] if s == nil { return } diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 0cc87f8509..eb96858043 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -1327,7 +1327,7 @@ func gcDumpObject(label string, obj, off uintptr) { k := obj >> _PageShift x := k x -= mheap_.arena_start >> _PageShift - s := h_spans[x] + s := mheap_.spans[x] print(label, "=", hex(obj), " k=", hex(k)) if s == nil { print(" s=nil\n") diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index f6ad4a170e..70067d07f9 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -49,9 +49,13 @@ type mheap struct { // access (since that may free the backing store). allspans []*mspan // all spans out there - // span lookup - spans **mspan - spans_mapped uintptr + // spans is a lookup table to map virtual address page IDs to *mspan. + // For allocated spans, their pages map to the span itself. + // For free spans, only the lowest and highest pages map to the span itself. + // Internal pages map to an arbitrary span. + // For pages that have never been allocated, spans entries are nil. + spans []*mspan + spans_mapped uintptr // bytes mapped starting at &spans[0] // Proportional sweep pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock @@ -244,13 +248,6 @@ func (s *mspan) layout() (size, n, total uintptr) { return } -// h_spans is a lookup table to map virtual address page IDs to *mspan. -// For allocated spans, their pages map to the span itself. -// For free spans, only the lowest and highest pages map to the span itself. Internal -// pages map to an arbitrary span. -// For pages that have never been allocated, h_spans entries are nil. -var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go - func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { h := (*mheap)(vh) s := (*mspan)(p) @@ -291,7 +288,7 @@ func inheap(b uintptr) bool { return false } // Not a beginning of a block, consult span table to find the block beginning. - s := h_spans[(b-mheap_.arena_start)>>_PageShift] + s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift] if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse { return false } @@ -306,7 +303,7 @@ func inHeapOrStack(b uintptr) bool { return false } // Not a beginning of a block, consult span table to find the block beginning. - s := h_spans[(b-mheap_.arena_start)>>_PageShift] + s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift] if s == nil || b < s.base() { return false } @@ -336,7 +333,7 @@ func spanOf(p uintptr) *mspan { // that p points into the heap (that is, mheap_.arena_start <= p < // mheap_.arena_used). func spanOfUnchecked(p uintptr) *mspan { - return h_spans[(p-mheap_.arena_start)>>_PageShift] + return mheap_.spans[(p-mheap_.arena_start)>>_PageShift] } func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 { @@ -389,7 +386,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 { } // Initialize the heap. -func (h *mheap) init(spans_size uintptr) { +func (h *mheap) init(spansStart, spansBytes uintptr) { h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys) h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys) h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys) @@ -407,10 +404,10 @@ func (h *mheap) init(spans_size uintptr) { h.central[i].mcentral.init(int32(i)) } - sp := (*slice)(unsafe.Pointer(&h_spans)) - sp.array = unsafe.Pointer(h.spans) - sp.len = int(spans_size / sys.PtrSize) - sp.cap = int(spans_size / sys.PtrSize) + sp := (*slice)(unsafe.Pointer(&h.spans)) + sp.array = unsafe.Pointer(spansStart) + sp.len = int(spansBytes / sys.PtrSize) + sp.cap = int(spansBytes / sys.PtrSize) } // mHeap_MapSpans makes sure that the spans are mapped @@ -430,7 +427,7 @@ func (h *mheap) mapSpans(arena_used uintptr) { if h.spans_mapped >= n { return } - sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys) + sysMap(add(unsafe.Pointer(&h.spans[0]), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys) h.spans_mapped = n } @@ -582,15 +579,15 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan { traceHeapAlloc() } - // h_spans is accessed concurrently without synchronization + // h.spans is accessed concurrently without synchronization // from other threads. Hence, there must be a store/store - // barrier here to ensure the writes to h_spans above happen + // barrier here to ensure the writes to h.spans above happen // before the caller can publish a pointer p to an object // allocated from s. As soon as this happens, the garbage // collector running on another processor could read p and - // look up s in h_spans. The unlock acts as the barrier to + // look up s in h.spans. The unlock acts as the barrier to // order these writes. On the read side, the data dependency - // between p and the index in h_spans orders the reads. + // between p and the index in h.spans orders the reads. unlock(&h.lock) return s } @@ -686,10 +683,10 @@ HaveSpan: s.npages = npage p := (t.base() - h.arena_start) >> _PageShift if p > 0 { - h_spans[p-1] = s + h.spans[p-1] = s } - h_spans[p] = t - h_spans[p+t.npages-1] = t + h.spans[p] = t + h.spans[p+t.npages-1] = t t.needzero = s.needzero s.state = _MSpanStack // prevent coalescing with s t.state = _MSpanStack @@ -700,7 +697,7 @@ HaveSpan: p := (s.base() - h.arena_start) >> _PageShift for n := uintptr(0); n < npage; n++ { - h_spans[p+n] = s + h.spans[p+n] = s } memstats.heap_inuse += uint64(npage << _PageShift) @@ -766,7 +763,7 @@ func (h *mheap) grow(npage uintptr) bool { s.init(uintptr(v), ask>>_PageShift) p := (s.base() - h.arena_start) >> _PageShift for i := p; i < p+s.npages; i++ { - h_spans[i] = s + h.spans[i] = s } atomic.Store(&s.sweepgen, h.sweepgen) s.state = _MSpanInUse @@ -781,7 +778,7 @@ func (h *mheap) grow(npage uintptr) bool { func (h *mheap) lookup(v unsafe.Pointer) *mspan { p := uintptr(v) p -= h.arena_start - return h_spans[p>>_PageShift] + return h.spans[p>>_PageShift] } // Look up the span at the given address. @@ -795,7 +792,7 @@ func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan { if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used { return nil } - s := h_spans[(uintptr(v)-h.arena_start)>>_PageShift] + s := h.spans[(uintptr(v)-h.arena_start)>>_PageShift] if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse { return nil } @@ -880,26 +877,26 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i // Coalesce with earlier, later spans. p := (s.base() - h.arena_start) >> _PageShift if p > 0 { - t := h_spans[p-1] + t := h.spans[p-1] if t != nil && t.state == _MSpanFree { s.startAddr = t.startAddr s.npages += t.npages s.npreleased = t.npreleased // absorb released pages s.needzero |= t.needzero p -= t.npages - h_spans[p] = s + h.spans[p] = s h.freeList(t.npages).remove(t) t.state = _MSpanDead h.spanalloc.free(unsafe.Pointer(t)) } } if (p+s.npages)*sys.PtrSize < h.spans_mapped { - t := h_spans[p+s.npages] + t := h.spans[p+s.npages] if t != nil && t.state == _MSpanFree { s.npages += t.npages s.npreleased += t.npreleased s.needzero |= t.needzero - h_spans[p+s.npages-1] = s + h.spans[p+s.npages-1] = s h.freeList(t.npages).remove(t) t.state = _MSpanDead h.spanalloc.free(unsafe.Pointer(t))