stack [maxCPUProfStack]uintptr
}
+//go:notinheap
type cpuProfile struct {
on bool // profiling is on
wait note // goroutine waits here
// There is no associated free operation.
// Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8).
+//
+// Consider marking persistentalloc'd types go:notinheap.
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
var p unsafe.Pointer
systemstack(func() {
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
+//
+//go:notinheap
type mcache struct {
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
import "runtime/internal/atomic"
// Central list of free objects of a given size.
+//
+//go:notinheap
type mcentral struct {
lock mutex
sizeclass int32
"unsafe"
)
+// finblock is allocated from non-GC'd memory, so any heap pointers
+// must be specially handled.
+//
+//go:notinheap
type finblock struct {
alllink *finblock
next *finblock
// NOTE: Layout known to queuefinalizer.
type finalizer struct {
- fn *funcval // function to call
- arg unsafe.Pointer // ptr to object
+ fn *funcval // function to call (may be a heap pointer)
+ arg unsafe.Pointer // ptr to object (may be a heap pointer)
nret uintptr // bytes of return values from fn
fint *_type // type of first argument of fn
- ot *ptrtype // type of ptr to object
+ ot *ptrtype // type of ptr to object (may be a heap pointer)
}
var finalizer1 = [...]byte{
lock(&finlock)
if finq == nil || finq.cnt == int32(len(finq.fin)) {
if finc == nil {
- // Note: write barrier here, assigning to finc, but should be okay.
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
finc.alllink = allfin
allfin = finc
// The caller is responsible for locking around FixAlloc calls.
// Callers can keep state in the object but the first word is
// smashed by freeing and reallocating.
+//
+// Consider marking fixalloc'd types go:notinheap.
type fixalloc struct {
size uintptr
first func(arg, p unsafe.Pointer) // called first time p is returned
// this cannot be used by some of the internal GC structures. For example when
// the sweeper is placing an unmarked object on the free list it does not want the
// write barrier to be called since that could result in the object being reachable.
+//
+//go:notinheap
type mlink struct {
next *mlink
}
// A wbufptr holds a workbuf*, but protects it from write barriers.
// workbufs never live on the heap, so write barriers are unnecessary.
// Write barriers on workbuf pointers may also be dangerous in the GC.
+//
+// TODO: Since workbuf is now go:notinheap, this isn't necessary.
type wbufptr uintptr
func wbufptrOf(w *workbuf) wbufptr {
nobj int
}
+//go:notinheap
type workbuf struct {
workbufhdr
// account for the above fields
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
// but all the other global data is here too.
+//
+// mheap must not be heap-allocated because it contains mSpanLists,
+// which must not be heap-allocated.
+//
+//go:notinheap
type mheap struct {
lock mutex
free [_MaxMHeapList]mSpanList // free lists of given length
// mSpanList heads a linked list of spans.
//
+//go:notinheap
type mSpanList struct {
first *mspan // first span in list, or nil if none
last *mspan // last span in list, or nil if none
}
+//go:notinheap
type mspan struct {
next *mspan // next span in list, or nil if none
prev *mspan // previous span in list, or nil if none
// if that happens.
)
+//go:notinheap
type special struct {
next *special // linked list in span
offset uint16 // span offset of object
}
// The described object has a finalizer set for it.
+//
+// specialfinalizer is allocated from non-GC'd memory, so any heap
+// pointers must be specially handled.
+//
+//go:notinheap
type specialfinalizer struct {
special special
- fn *funcval
+ fn *funcval // May be a heap pointer.
nret uintptr
- fint *_type
- ot *ptrtype
+ fint *_type // May be a heap pointer, but always live.
+ ot *ptrtype // May be a heap pointer, but always live.
}
// Adds a finalizer to the object p. Returns true if it succeeded.
}
// The described object is being heap profiled.
+//
+//go:notinheap
type specialprofile struct {
special special
b *bucket
next uintptr // *gcBits triggers recursive type bug. (issue 14620)
}
+//go:notinheap
type gcBits struct {
// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
free uintptr // free is the index into bits of the next free byte.
//
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
+//
+// No heap pointers.
+//
+//go:notinheap
type bucket struct {
next *bucket
allnext *bucket
const pollBlockSize = 4 * 1024
// Network poller descriptor.
+//
+// No heap pointers.
+//
+//go:notinheap
type pollDesc struct {
link *pollDesc // in pollcache, protected by pollcache.lock
}
// traceBuf is per-P tracing buffer.
+//
+//go:notinheap
type traceBuf struct {
traceBufHeader
arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
// allocated from the GC'd heap, so this is safe, and are often
// manipulated in contexts where write barriers are not allowed, so
// this is necessary.
+//
+// TODO: Since traceBuf is now go:notinheap, this isn't necessary.
type traceBufPtr uintptr
func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
// traceAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceAllocBlocks do
// not need write barriers.
+//
+//go:notinheap
type traceAllocBlock struct {
next traceAllocBlockPtr
data [64<<10 - sys.PtrSize]byte
}
+// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
type traceAllocBlockPtr uintptr
func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }