// These "redzones" are marked as unaddressable.
var asanRZ uintptr
if asanenabled {
- asanRZ = computeRZlog(size)
+ asanRZ = redZoneSize(size)
size += asanRZ
}
// Poison the space between the end of the requested size of x
// and the end of the slot. Unpoison the requested allocation.
frag := elemsize - size
- if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) {
+ if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-mallocHeaderSize {
frag -= mallocHeaderSize
}
- asanpoison(unsafe.Add(x, size-asanRZ), asanRZ+frag)
+ asanpoison(unsafe.Add(x, size-asanRZ), asanRZ)
asanunpoison(x, size-asanRZ)
}
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, size)
}
- c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+ // initHeapBits already set the pointer bits for the 8-byte sizeclass
+ // on 64-bit platforms.
+ c.scanAlloc += 8
+ } else {
+ c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
+ }
size = uintptr(class_to_size[sizeclass])
// Ensure that the stores above that initialize x to
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}
-// computeRZlog computes the size of the redzone.
+// redZoneSize computes the size of the redzone for a given allocation.
// Refer to the implementation of the compiler-rt.
-func computeRZlog(userSize uintptr) uintptr {
+func redZoneSize(userSize uintptr) uintptr {
switch {
case userSize <= (64 - 16):
return 16 << 0
//
//go:nosplit
func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
- if goarch.PtrSize == 8 && dataSize == goarch.PtrSize {
- // Already set by initHeapBits.
- return
- }
-
// The objects here are always really small, so a single load is sufficient.
src0 := readUintptr(typ.GCData)
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
} else {
+ // N.B. We rely on dataSize being an exact multiple of the type size.
+ // The alternative is to be defensive and mask out src to the length
+ // of dataSize. The purpose is to save on one additional masking operation.
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
+ if asanenabled {
+ // Mask src down to dataSize. dataSize is going to be a strange size because of
+ // the redzone required for allocations when asan is enabled.
+ src &= (1 << (dataSize / goarch.PtrSize)) - 1
+ }
}
// Since we're never writing more than one uintptr's worth of bits, we're either going