// buf points into the same allocation, elemtype is persistent.
// SudoG's are referenced from their owning thread so they can't be collected.
// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
- c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, flagNoScan))
+ c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true))
if size > 0 && elem.size != 0 {
c.buf = add(unsafe.Pointer(c), hchanSize)
} else {
const (
debugMalloc = false
- flagNoScan = _FlagNoScan
- flagNoZero = _FlagNoZero
-
maxTinySize = _TinySize
tinySizeClass = _TinySizeClass
maxSmallSize = _MaxSmallSize
// base address for all 0-byte allocations
var zerobase uintptr
-const (
- // flags to malloc
- _FlagNoScan = 1 << 0 // GC doesn't have to scan object
- _FlagNoZero = 1 << 1 // don't zero memory
-)
-
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
-func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
+func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
return unsafe.Pointer(&zerobase)
}
- if flags&flagNoScan == 0 && typ == nil {
- throw("malloc missing type")
- }
-
if debug.sbrk != 0 {
align := uintptr(16)
if typ != nil {
c := gomcache()
var s *mspan
var x unsafe.Pointer
+ noscan := typ == nil || typ.kind&kindNoPointers != 0
if size <= maxSmallSize {
- if flags&flagNoScan != 0 && size < maxTinySize {
+ if noscan && size < maxTinySize {
// Tiny allocator.
//
// Tiny allocator combines several tiny allocation requests
// into a single memory block. The resulting memory block
// is freed when all subobjects are unreachable. The subobjects
- // must be FlagNoScan (don't have pointers), this ensures that
+ // must be noscan (don't have pointers), this ensures that
// the amount of potentially wasted memory is bounded.
//
// Size of the memory block used for combining (maxTinySize) is tunable.
// prefetchnta offers best performance, see change list message.
prefetchnta(uintptr(v.ptr().next))
x = unsafe.Pointer(v)
- if flags&flagNoZero == 0 {
+ if needzero {
v.ptr().next = 0
if size > 2*sys.PtrSize && ((*[2]uintptr)(x))[1] != 0 {
memclr(unsafe.Pointer(v), size)
var s *mspan
shouldhelpgc = true
systemstack(func() {
- s = largeAlloc(size, flags)
+ s = largeAlloc(size, needzero)
})
x = unsafe.Pointer(uintptr(s.start << pageShift))
size = s.elemsize
}
- if flags&flagNoScan != 0 {
+ if noscan {
// All objects are pre-marked as noscan. Nothing to do.
} else {
// If allocating a defer+arg block, now that we've picked a malloc size
return x
}
-func largeAlloc(size uintptr, flag uint32) *mspan {
+func largeAlloc(size uintptr, needzero bool) *mspan {
// print("largeAlloc size=", size, "\n")
if size+_PageSize < size {
// pays the debt down to npage pages.
deductSweepCredit(npages*_PageSize, npages)
- s := mheap_.alloc(npages, 0, true, flag&_FlagNoZero == 0)
+ s := mheap_.alloc(npages, 0, true, needzero)
if s == nil {
throw("out of memory")
}
// implementation of new builtin
func newobject(typ *_type) unsafe.Pointer {
- flags := uint32(0)
- if typ.kind&kindNoPointers != 0 {
- flags |= flagNoScan
- }
- return mallocgc(typ.size, typ, flags)
+ return mallocgc(typ.size, typ, true)
}
//go:linkname reflect_unsafe_New reflect.unsafe_New
// implementation of make builtin for slices
func newarray(typ *_type, n uintptr) unsafe.Pointer {
- flags := uint32(0)
- if typ.kind&kindNoPointers != 0 {
- flags |= flagNoScan
- }
if int(n) < 0 || n > maxSliceCap(typ.size) {
panic(plainError("runtime: allocation size out of range"))
}
- return mallocgc(typ.size*n, typ, flags)
+ return mallocgc(typ.size*n, typ, true)
}
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
return newarray(typ, n)
}
-// rawmem returns a chunk of pointerless memory. It is
-// not zeroed.
-func rawmem(size uintptr) unsafe.Pointer {
- return mallocgc(size, nil, flagNoScan|flagNoZero)
-}
-
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
mp.mcache.next_sample = nextSample()
mProf_Malloc(x, size)
// all not yet finalized objects are stored in finq.
// If we do not mark it as FlagNoScan,
// the last finalized object is not collected.
- frame = mallocgc(framesz, nil, flagNoScan)
+ frame = mallocgc(framesz, nil, true)
framecap = framesz
}
// Initialize stack and goroutine for note handling.
mp.gsignal = malg(32 * 1024)
mp.gsignal.m = mp
- mp.notesig = (*int8)(mallocgc(_ERRMAX, nil, _FlagNoScan))
+ mp.notesig = (*int8)(mallocgc(_ERRMAX, nil, true))
// Initialize stack for handling strings from the
// errstr system call, as used in package syscall.
- mp.errstr = (*byte)(mallocgc(_ERRMAX, nil, _FlagNoScan))
+ mp.errstr = (*byte)(mallocgc(_ERRMAX, nil, true))
}
func msigsave(mp *m) {
if d == nil {
// Allocate new defer+args.
total := roundupsize(totaldefersize(uintptr(siz)))
- d = (*_defer)(mallocgc(total, deferType, 0))
+ d = (*_defer)(mallocgc(total, deferType, true))
}
d.siz = siz
gp := mp.curg
func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
// flagNoScan is safe here, because all objects are also referenced from cases.
size := selectsize(uintptr(len(cases)))
- sel := (*hselect)(mallocgc(size, nil, flagNoScan))
+ sel := (*hselect)(mallocgc(size, nil, true))
newselect(sel, int64(size), int32(len(cases)))
r := new(bool)
for i := range cases {
// when someone does make([]T, bignumber). 'cap out of range' is true too,
// but since the cap is only being supplied implicitly, saying len is clearer.
// See issue 4085.
-
maxElements := maxSliceCap(et.size)
len := int(len64)
if len64 < 0 || int64(len) != len64 || uintptr(len) > maxElements {
panic(errorString("makeslice: cap out of range"))
}
- var flags uint32
- if et.kind&kindNoPointers != 0 {
- flags = flagNoScan
- }
- p := mallocgc(et.size*uintptr(cap), et, flags)
+ p := mallocgc(et.size*uintptr(cap), et, true)
return slice{p, len, cap}
}
var p unsafe.Pointer
if et.kind&kindNoPointers != 0 {
- p = rawmem(capmem)
+ p = mallocgc(capmem, nil, false)
memmove(p, old.array, lenmem)
memclr(add(p, lenmem), capmem-lenmem)
} else {
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
- p = mallocgc(capmem, et, 0)
+ p = mallocgc(capmem, et, true)
if !writeBarrier.enabled {
memmove(p, old.array, lenmem)
} else {
// The storage is not zeroed. Callers should use
// b to set the string contents and then drop b.
func rawstring(size int) (s string, b []byte) {
- p := mallocgc(uintptr(size), nil, flagNoScan|flagNoZero)
+ p := mallocgc(uintptr(size), nil, false)
stringStructOf(&s).str = p
stringStructOf(&s).len = size
// rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
func rawbyteslice(size int) (b []byte) {
cap := roundupsize(uintptr(size))
- p := mallocgc(cap, nil, flagNoScan|flagNoZero)
+ p := mallocgc(cap, nil, false)
if cap != uintptr(size) {
memclr(add(p, uintptr(size)), cap-uintptr(size))
}
throw("out of memory")
}
mem := roundupsize(uintptr(size) * 4)
- p := mallocgc(mem, nil, flagNoScan|flagNoZero)
+ p := mallocgc(mem, nil, false)
if mem != uintptr(size)*4 {
memclr(add(p, uintptr(size)*4), mem-uintptr(size)*4)
}