return treapIter{f, root.treap.findMaximal(f)}
}
+// mutate allows one to mutate the span without removing it from the treap via a
+// callback. The span's base and size are allowed to change as long as the span
+// remains in the same order relative to its predecessor and successor.
+//
+// Note however that any operation that causes a treap rebalancing inside of fn
+// is strictly forbidden, as that may cause treap node metadata to go
+// out-of-sync.
+func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) {
+ s := i.span()
+ // Save some state about the span for later inspection.
+ hpages := s.hugePages()
+ scavenged := s.scavenged
+ // Call the mutator.
+ fn(s)
+ // Update unscavHugePages appropriately.
+ if !scavenged {
+ mheap_.free.unscavHugePages -= hpages
+ }
+ if !s.scavenged {
+ mheap_.free.unscavHugePages += s.hugePages()
+ }
+ // Update the key in case the base changed.
+ i.t.key = s.base()
+ // Updating invariants up the tree needs to happen if
+ // anything changed at all, so just go ahead and do it
+ // unconditionally.
+ //
+ // If it turns out nothing changed, it'll exit quickly.
+ t := i.t
+ for t != nil && t.updateInvariants() {
+ t = t.parent
+ }
+}
+
// insert adds span to the large span treap.
func (root *mTreap) insert(span *mspan) {
if !span.scavenged {
HaveSpan:
s := t.span()
- h.free.erase(t)
-
- // Mark span in use.
if s.state != mSpanFree {
throw("candidate mspan for allocation is not free")
}
- if s.npages < npage {
- throw("candidate mspan for allocation is too small")
- }
// First, subtract any memory that was released back to
- // the OS from s. We will re-scavenge the trimmed section
- // if necessary.
+ // the OS from s. We will add back what's left if necessary.
memstats.heap_released -= uint64(s.released())
- if s.npages > npage {
- // Trim extra and put it back in the heap.
- t := (*mspan)(h.spanalloc.alloc())
- t.init(s.base()+npage<<_PageShift, s.npages-npage)
- s.npages = npage
- h.setSpan(t.base()-1, s)
- h.setSpan(t.base(), t)
- h.setSpan(t.base()+t.npages*pageSize-1, t)
- t.needzero = s.needzero
- // If s was scavenged, then t may be scavenged.
- start, end := t.physPageBounds()
- if s.scavenged && start < end {
- memstats.heap_released += uint64(end - start)
- t.scavenged = true
- }
- s.state = mSpanManual // prevent coalescing with s
- t.state = mSpanManual
- h.freeSpanLocked(t, false, false, s.unusedsince)
- s.state = mSpanFree
+ if s.npages == npage {
+ h.free.erase(t)
+ } else if s.npages > npage {
+ // Trim off the lower bits and make that our new span.
+ // Do this in-place since this operation does not
+ // affect the original span's location in the treap.
+ n := (*mspan)(h.spanalloc.alloc())
+ h.free.mutate(t, func(s *mspan) {
+ n.init(s.base(), npage)
+ s.npages -= npage
+ s.startAddr = s.base() + npage*pageSize
+ h.setSpan(s.base()-1, n)
+ h.setSpan(s.base(), s)
+ h.setSpan(n.base(), n)
+ n.needzero = s.needzero
+ // n may not be big enough to actually be scavenged, but that's fine.
+ // We still want it to appear to be scavenged so that we can do the
+ // right bookkeeping later on in this function (i.e. sysUsed).
+ n.scavenged = s.scavenged
+ // Check if s is still scavenged.
+ if s.scavenged {
+ start, end := s.physPageBounds()
+ if start < end {
+ memstats.heap_released += uint64(end - start)
+ } else {
+ s.scavenged = false
+ }
+ }
+ })
+ s = n
+ } else {
+ throw("candidate mspan for allocation is too small")
}
// "Unscavenge" s only AFTER splitting so that
// we only sysUsed whatever we actually need.