throw("uncaching span but s.allocCount == 0")
}
- cap := int32((s.npages << _PageShift) / s.elemsize)
- n := cap - int32(s.allocCount)
-
- // cacheSpan updated alloc assuming all objects on s were
- // going to be allocated. Adjust for any that weren't. We must
- // do this before potentially sweeping the span.
- if n > 0 {
- atomic.Xadd64(&c.nmalloc, -int64(n))
- }
-
sg := mheap_.sweepgen
stale := s.sweepgen == sg+1
if stale {
// responsibility to sweep it.
//
// Set sweepgen to indicate it's not cached but needs
- // sweeping. sweep will set s.sweepgen to indicate s
- // is swept.
- s.sweepgen = sg - 1
- s.sweep(true)
- // sweep may have freed objects, so recompute n.
- n = cap - int32(s.allocCount)
+ // sweeping and can't be allocated from. sweep will
+ // set s.sweepgen to indicate s is swept.
+ atomic.Store(&s.sweepgen, sg-1)
} else {
// Indicate that s is no longer cached.
- s.sweepgen = sg
+ atomic.Store(&s.sweepgen, sg)
}
+ cap := int32((s.npages << _PageShift) / s.elemsize)
+ n := cap - int32(s.allocCount)
if n > 0 {
+ // cacheSpan updated alloc assuming all objects on s
+ // were going to be allocated. Adjust for any that
+ // weren't. We must do this before potentially
+ // sweeping the span.
+ atomic.Xadd64(&c.nmalloc, -int64(n))
+
lock(&c.lock)
c.empty.remove(s)
c.nonempty.insert(s)
}
unlock(&c.lock)
}
+
+ if stale {
+ // Now that s is in the right mcentral list, we can
+ // sweep it.
+ s.sweep(false)
+ }
}
// freeSpan updates c and s after sweeping s.