}
func AllocSpan(base, npages uintptr, scavenged bool) Span {
- lock(&mheap_.lock)
- s := (*mspan)(mheap_.spanalloc.alloc())
- unlock(&mheap_.lock)
+ var s *mspan
+ systemstack(func() {
+ lock(&mheap_.lock)
+ s = (*mspan)(mheap_.spanalloc.alloc())
+ unlock(&mheap_.lock)
+ })
s.init(base, npages)
s.scavenged = scavenged
return Span{s}
}
func (s *Span) Free() {
- lock(&mheap_.lock)
- mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
- unlock(&mheap_.lock)
+ systemstack(func() {
+ lock(&mheap_.lock)
+ mheap_.spanalloc.free(unsafe.Pointer(s.mspan))
+ unlock(&mheap_.lock)
+ })
s.mspan = nil
}
// allocation which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
- lock(&mheap_.lock)
- t.insert(s.mspan)
- unlock(&mheap_.lock)
+ systemstack(func() {
+ lock(&mheap_.lock)
+ t.insert(s.mspan)
+ unlock(&mheap_.lock)
+ })
t.CheckInvariants()
}
// freeing which requires the mheap_ lock to manipulate.
// Locking here is safe because the treap itself never allocs
// or otherwise ends up grabbing this lock.
- lock(&mheap_.lock)
- t.erase(i.treapIter)
- unlock(&mheap_.lock)
+ systemstack(func() {
+ lock(&mheap_.lock)
+ t.erase(i.treapIter)
+ unlock(&mheap_.lock)
+ })
t.CheckInvariants()
}
func (t *Treap) RemoveSpan(s Span) {
// See Erase about locking.
- lock(&mheap_.lock)
- t.removeSpan(s.mspan)
- unlock(&mheap_.lock)
+ systemstack(func() {
+ lock(&mheap_.lock)
+ t.removeSpan(s.mspan)
+ unlock(&mheap_.lock)
+ })
t.CheckInvariants()
}
var emptymspan mspan
func allocmcache() *mcache {
- lock(&mheap_.lock)
- c := (*mcache)(mheap_.cachealloc.alloc())
- c.flushGen = mheap_.sweepgen
- unlock(&mheap_.lock)
+ var c *mcache
+ systemstack(func() {
+ lock(&mheap_.lock)
+ c = (*mcache)(mheap_.cachealloc.alloc())
+ c.flushGen = mheap_.sweepgen
+ unlock(&mheap_.lock)
+ })
for i := range c.alloc {
c.alloc[i] = &emptymspan
}
//go:linkname setGCPercent runtime/debug.setGCPercent
func setGCPercent(in int32) (out int32) {
- lock(&mheap_.lock)
- out = gcpercent
- if in < 0 {
- in = -1
- }
- gcpercent = in
- heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100
- // Update pacing in response to gcpercent change.
- gcSetTriggerRatio(memstats.triggerRatio)
- unlock(&mheap_.lock)
+ // Run on the system stack since we grab the heap lock.
+ systemstack(func() {
+ lock(&mheap_.lock)
+ out = gcpercent
+ if in < 0 {
+ in = -1
+ }
+ gcpercent = in
+ heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100
+ // Update pacing in response to gcpercent change.
+ gcSetTriggerRatio(memstats.triggerRatio)
+ unlock(&mheap_.lock)
+ })
// If we just disabled GC, wait for any concurrent GC mark to
// finish so we always return with no GC running.
gcBgMarkStartWorkers()
- gcResetMarkState()
+ systemstack(gcResetMarkState)
work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
if work.stwprocs > ncpu {
}
}
+// gcSweep must be called on the system stack because it acquires the heap
+// lock. See mheap for details.
+//go:systemstack
func gcSweep(mode gcMode) {
if gcphase != _GCoff {
throw("gcSweep being done but phase is not GCoff")
//
// This is safe to do without the world stopped because any Gs created
// during or after this will start out in the reset state.
+//
+// gcResetMarkState must be called on the system stack because it acquires
+// the heap lock. See mheap for details.
+//
+//go:systemstack
func gcResetMarkState() {
// This may be called during a concurrent phase, so make sure
// allgs doesn't change.
//
//go:notinheap
type mheap struct {
+ // lock must only be acquired on the system stack, otherwise a g
+ // could self-deadlock if its stack grows with the lock held.
lock mutex
free mTreap // free spans
sweepgen uint32 // sweep generation, see comment in mspan
// The memory backing the returned span may not be zeroed if
// span.needzero is set.
//
-// allocManual must be called on the system stack to prevent stack
-// growth. Since this is used by the stack allocator, stack growth
-// during allocManual would self-deadlock.
+// allocManual must be called on the system stack because it acquires
+// the heap lock. See mheap for details.
//
//go:systemstack
func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
// This must only be called when gcphase == _GCoff. See mSpanState for
// an explanation.
//
-// freeManual must be called on the system stack to prevent stack
-// growth, just like allocManual.
+// freeManual must be called on the system stack because it acquires
+// the heap lock. See mheap for details.
//
//go:systemstack
func (h *mheap) freeManual(s *mspan, stat *uint64) {
})
}
+// readGCStats_m must be called on the system stack because it acquires the heap
+// lock. See mheap for details.
+//go:systemstack
func readGCStats_m(pauses *[]uint64) {
p := *pauses
// Calling code in runtime/debug should make the slice large enough.