s = largeAlloc(size, flags)
})
s.freeindex = 1
- x = unsafe.Pointer(uintptr(s.start << pageShift))
+ x = unsafe.Pointer(s.base())
size = s.elemsize
}
if s == nil {
throw("out of memory")
}
- s.limit = uintptr(s.start)<<_PageShift + size
+ s.limit = s.base() + size
heapBitsForSpan(s.base()).initSpan(s)
return s
}
} else {
print(" to unused region of span")
}
- print("idx=", hex(idx), " span.start=", hex(s.start<<_PageShift), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
+ print("idx=", hex(idx), " span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
if refBase != 0 {
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
gcDumpObject("object", refBase, refOff)
return nil
}
- p := uintptr(s.start << _PageShift)
+ p := s.base()
s.limit = p + size*n
heapBitsForSpan(s.base()).initSpan(s)
// retain everything it points to.
spf := (*specialfinalizer)(unsafe.Pointer(sp))
// A finalizer can be set for an inner byte of an object, find object beginning.
- p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
+ p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
// Mark everything that can be reached from
// the object (but *not* the object itself or
special := *specialp
for special != nil {
// A finalizer can be set for an inner byte of an object, find object beginning.
- p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
+ p := s.base() + uintptr(special.offset)/size*size
mbits := s.markBitsForAddr(p)
if !mbits.isMarked() {
// This object is not marked and has at least one special record.
// Pass 1: see if it has at least one finalizer.
hasFin := false
- endOffset := p - uintptr(s.start<<_PageShift) + size
+ endOffset := p - s.base() + size
for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
if tmp.kind == _KindSpecialFinalizer {
// Stop freeing of object if it has a finalizer.
for special != nil && uintptr(special.offset) < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
- p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
+ p := s.base() + uintptr(special.offset)
if special.kind == _KindSpecialFinalizer || !hasFin {
// Splice out special record.
y := special
// implement and then call some kind of MHeap_DeleteSpan.
if debug.efence > 0 {
s.limit = 0 // prevent mlookup from finding this span
- sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
+ sysFault(unsafe.Pointer(s.base()), size)
} else {
mheap_.freeSpan(s, 1)
}
next *mspan // next span in list, or nil if none
prev **mspan // previous span's next field, or list head's first field if none
list *mSpanList // For debugging. TODO: Remove.
-
+ //TODO:(rlh) Eliminate start field and use startAddr >> PageShift instead.
+ startAddr uintptr // uintptr(s.start << _PageShift) aka s.base()
start pageID // starting page number
npages uintptr // number of pages in span
stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
}
func (s *mspan) base() uintptr {
- return uintptr(s.start << _PageShift)
+ return s.startAddr
}
func (s *mspan) layout() (size, n, total uintptr) {
return 0
}
- p := uintptr(s.start) << _PageShift
+ p := s.base()
if s.sizeclass == 0 {
// Large object.
if base != nil {
if s != nil {
if needzero && s.needzero != 0 {
- memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
+ memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift)
}
s.needzero = 0
}
throw("still in list")
}
if s.npreleased > 0 {
- sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
+ sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
memstats.heap_released -= uint64(s.npreleased << _PageShift)
s.npreleased = 0
}
t := h_spans[p-1]
if t != nil && t.state == _MSpanFree {
s.start = t.start
+ s.startAddr = uintptr(s.start << _PageShift)
s.npages += t.npages
s.npreleased = t.npreleased // absorb released pages
s.needzero |= t.needzero
span.prev = nil
span.list = nil
span.start = start
+ span.startAddr = uintptr(start << _PageShift)
span.npages = npages
span.allocCount = 0
span.sizeclass = 0