var stackpool [_NumStackOrders]mSpanList
var stackpoolmu mutex
-// List of stack spans to be freed at the end of GC. Protected by
-// stackpoolmu.
-var stackFreeQueue mSpanList
+// Global pool of large stack spans.
+var stackLarge struct {
+ lock mutex
+ free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
+}
// Cached value of haveexperiment("framepointer")
var framepointer_enabled bool
for i := range stackpool {
stackpool[i].init()
}
- stackFreeQueue.init()
+ for i := range stackLarge.free {
+ stackLarge.free[i].init()
+ }
+}
+
+// stacklog2 returns ⌊log_2(n)⌋.
+func stacklog2(n uintptr) int {
+ log2 := 0
+ for n > 1 {
+ n >>= 1
+ log2++
+ }
+ return log2
}
// Allocates a stack from the free pool. Must be called with
}
v = unsafe.Pointer(x)
} else {
- s := mheap_.allocStack(round(uintptr(n), _PageSize) >> _PageShift)
+ var s *mspan
+ npage := uintptr(n) >> _PageShift
+ log2npage := stacklog2(npage)
+
+ // Try to get a stack from the large stack cache.
+ lock(&stackLarge.lock)
+ if !stackLarge.free[log2npage].isEmpty() {
+ s = stackLarge.free[log2npage].first
+ stackLarge.free[log2npage].remove(s)
+ }
+ unlock(&stackLarge.lock)
+
if s == nil {
- throw("out of memory")
+ // Allocate a new stack from the heap.
+ s = mheap_.allocStack(npage)
+ if s == nil {
+ throw("out of memory")
+ }
}
v = unsafe.Pointer(s.start << _PageShift)
}
// sweeping.
mheap_.freeStack(s)
} else {
- // Otherwise, add it to a list of stack spans
- // to be freed at the end of GC.
- //
- // TODO(austin): Make it possible to re-use
- // these spans as stacks, like we do for small
- // stack spans. (See issue #11466.)
- lock(&stackpoolmu)
- stackFreeQueue.insert(s)
- unlock(&stackpoolmu)
+ // If the GC is running, we can't return a
+ // stack span to the heap because it could be
+ // reused as a heap span, and this state
+ // change would race with GC. Add it to the
+ // large stack cache instead.
+ log2npage := stacklog2(s.npages)
+ lock(&stackLarge.lock)
+ stackLarge.free[log2npage].insert(s)
+ unlock(&stackLarge.lock)
}
}
}
}
}
- // Free queued stack spans.
- for !stackFreeQueue.isEmpty() {
- s := stackFreeQueue.first
- stackFreeQueue.remove(s)
- mheap_.freeStack(s)
- }
-
unlock(&stackpoolmu)
+
+ // Free large stack spans.
+ lock(&stackLarge.lock)
+ for i := range stackLarge.free {
+ for s := stackLarge.free[i].first; s != nil; {
+ next := s.next
+ stackLarge.free[i].remove(s)
+ mheap_.freeStack(s)
+ s = next
+ }
+ }
+ unlock(&stackLarge.lock)
}
//go:nosplit