// There is no associated free operation.
// Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8).
+// The returned memory will be zeroed.
//
// Consider marking persistentalloc'd types go:notinheap.
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
lock(&mheap_.lock)
c := (*mcache)(mheap_.cachealloc.alloc())
unlock(&mheap_.lock)
- memclr(unsafe.Pointer(c), unsafe.Sizeof(*c))
for i := 0; i < _NumSizeClasses; i++ {
c.alloc[i] = &emptymspan
}
// Malloc uses a FixAlloc wrapped around sysAlloc to manages its
// MCache and MSpan objects.
//
-// Memory returned by FixAlloc_Alloc is not zeroed.
+// Memory returned by fixalloc.alloc is zeroed by default, but the
+// caller may take responsibility for zeroing allocations by setting
+// the zero flag to false. This is only safe if the memory never
+// contains heap pointers.
+//
// The caller is responsible for locking around FixAlloc calls.
// Callers can keep state in the object but the first word is
// smashed by freeing and reallocating.
nchunk uint32
inuse uintptr // in-use bytes now
stat *uint64
+ zero bool // zero allocations
}
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
f.nchunk = 0
f.inuse = 0
f.stat = stat
+ f.zero = true
}
func (f *fixalloc) alloc() unsafe.Pointer {
v := unsafe.Pointer(f.list)
f.list = f.list.next
f.inuse += f.size
+ if f.zero {
+ memclr(v, f.size)
+ }
return v
}
if uintptr(f.nchunk) < f.size {
h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
+ // Don't zero mspan allocations. Background sweeping can
+ // inspect a span concurrently with allocating it, so it's
+ // important that the span's sweepgen survive across freeing
+ // and re-allocating a span to prevent background sweeping
+ // from improperly cas'ing it from 0.
+ //
+ // This is safe because mspan contains no heap pointers.
+ h.spanalloc.zero = false
+
// h->mapcache needs no init
for i := range h.free {
h.free[i].init()
// Initialize a new span with the given start and npages.
func (span *mspan) init(base uintptr, npages uintptr) {
+ // span is *not* zeroed.
span.next = nil
span.prev = nil
span.list = nil