// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
- if other.scavenged {
- h.scav.removeSpan(other)
- } else {
- h.free.removeSpan(other)
- }
+ h.treapForSpan(other).removeSpan(other)
other.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(other))
}
return
}
// Since we're resizing other, we must remove it from the treap.
- if other.scavenged {
- h.scav.removeSpan(other)
- } else {
- h.free.removeSpan(other)
- }
+ h.treapForSpan(other).removeSpan(other)
+
// Round boundary to the nearest physical page size, toward the
// scavenged span.
boundary := b.startAddr
h.setSpan(boundary, b)
// Re-insert other now that it has a new size.
- if other.scavenged {
- h.scav.insert(other)
- } else {
- h.free.insert(other)
- }
+ h.treapForSpan(other).insert(other)
}
// Coalesce with earlier, later spans.
}
}
+// treapForSpan returns the appropriate treap for a span for
+// insertion and removal.
+func (h *mheap) treapForSpan(span *mspan) *mTreap {
+ if span.scavenged {
+ return &h.scav
+ }
+ return &h.free
+}
+
// pickFreeSpan acquires a free span from internal free list
// structures if one is available. Otherwise returns nil.
// h must be locked.
h.coalesce(s)
// Insert s into the appropriate treap.
- if s.scavenged {
- h.scav.insert(s)
- } else {
- h.free.insert(s)
- }
+ h.treapForSpan(s).insert(s)
}
// scavengeLargest scavenges nbytes worth of spans in unscav