return t.TFlag&TFlagNamed != 0
}
+// Pointers reports whether t contains pointers.
func (t *Type) Pointers() bool { return t.PtrBytes != 0 }
// IfaceIndir reports whether t is stored indirectly in an interface value.
typ := v.Type().Elem().common()
size := typ.Size()
- hasPtr := typ.PtrBytes != 0
+ hasPtr := typ.Pointers()
// Some common & small cases, without using memmove:
if hasPtr {
hard := func(v1, v2 Value) bool {
switch v1.Kind() {
case Pointer:
- if v1.typ().PtrBytes == 0 {
+ if !v1.typ().Pointers() {
// not-in-heap pointers can't be cyclic.
// At least, all of our current uses of runtime/internal/sys.NotInHeap
// have that property. The runtime ones aren't cyclic (and we don't use
}
// Expand frame type's GC bitmap into byte-map.
- ptrs = ft.PtrBytes != 0
+ ptrs = ft.Pointers()
if ptrs {
nptrs := ft.PtrBytes / goarch.PtrSize
gcdata := ft.GcSlice(0, (nptrs+7)/8)
typ := v.Type().Elem().common()
size := typ.Size()
- hasPtr := typ.PtrBytes != 0
+ hasPtr := typ.Pointers()
// Some common & small cases, without using memmove:
if hasPtr {
panic("reflect: bad size computation in MapOf")
}
- if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 {
+ if ktyp.Pointers() || etyp.Pointers() {
nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
n := (nptr + 7) / 8
mask := make([]byte, n)
base := uintptr(abi.MapBucketCount / goarch.PtrSize)
- if ktyp.PtrBytes != 0 {
+ if ktyp.Pointers() {
emitGCMask(mask, base, ktyp, abi.MapBucketCount)
}
base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
- if etyp.PtrBytes != 0 {
+ if etyp.Pointers() {
emitGCMask(mask, base, etyp, abi.MapBucketCount)
}
base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
}
}
array.Size_ = typ.Size_ * uintptr(length)
- if length > 0 && typ.PtrBytes != 0 {
+ if length > 0 && typ.Pointers() {
array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes
}
array.Align_ = typ.Align_
array.Slice = &(SliceOf(elem).(*rtype).t)
switch {
- case typ.PtrBytes == 0 || array.Size_ == 0:
+ case !typ.Pointers() || array.Size_ == 0:
// No pointers.
array.GCData = nil
array.PtrBytes = 0
}
func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) {
- if t.PtrBytes == 0 {
+ if !t.Pointers() {
return
}
k := v.kind()
switch k {
case Pointer:
- if v.typ().PtrBytes == 0 {
+ if !v.typ().Pointers() {
val := *(*uintptr)(v.ptr)
// Since it is a not-in-heap pointer, all pointers to the heap are
// forbidden! See comment in Value.Elem and issue #48399.
k := v.kind()
switch k {
case Pointer:
- if v.typ().PtrBytes == 0 {
+ if !v.typ().Pointers() {
// Since it is a not-in-heap pointer, all pointers to the heap are
// forbidden! See comment in Value.Elem and issue #48399.
if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) {
mp.mallocing = 1
var ptr unsafe.Pointer
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
// Allocate pointer-less objects from the tail end of the chunk.
v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_)
if ok {
throw("arena chunk needs zeroing, but should already be zeroed")
}
// Set up heap bitmap and do extra accounting.
- if typ.PtrBytes != 0 {
+ if typ.Pointers() {
if cap >= 0 {
userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
} else {
// level, where Go pointers are allowed. Go pointers to pinned objects are
// allowed as long as they don't reference other unpinned pointers.
func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
- if t.PtrBytes == 0 || p == nil {
+ if !t.Pointers() || p == nil {
// If the type has no pointers there is nothing to do.
return
}
if !top && !isPinned(p) {
panic(errorString(msg))
}
- if st.Elem.PtrBytes == 0 {
+ if !st.Elem.Pointers() {
return
}
for i := 0; i < s.cap; i++ {
return
}
for _, f := range st.Fields {
- if f.Typ.PtrBytes == 0 {
+ if !f.Typ.Pointers() {
continue
}
cgoCheckArg(f.Typ, add(p, f.Offset), true, top, msg)
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
return
}
if !cgoIsGoPointer(src) {
//go:nosplit
//go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) {
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
return
}
if !cgoIsGoPointer(src) {
//go:nowritebarrier
//go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
- if typ.PtrBytes == 0 {
+ if !typ.Pointers() {
return
}
c = (*hchan)(mallocgc(hchanSize, nil, true))
// Race detector uses this location for synchronization.
c.buf = c.raceaddr()
- case elem.PtrBytes == 0:
+ case !elem.Pointers():
// Elements do not contain pointers.
// Allocate hchan and buf in one call.
c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
// Note that we allow unaligned pointers if the types they point to contain
// no pointers themselves. See issue 37298.
// TODO(mdempsky): What about fieldAlign?
- if elem.PtrBytes != 0 && uintptr(p)&(uintptr(elem.Align_)-1) != 0 {
+ if elem.Pointers() && uintptr(p)&(uintptr(elem.Align_)-1) != 0 {
throw("checkptr: misaligned pointer conversion")
}
// Round up the size to the size class to make the benchmark a little more
// realistic. However, validate it, to make sure this is safe.
- allocSize := roundupsize(size, t.PtrBytes == 0)
+ allocSize := roundupsize(size, !t.Pointers())
if s.npages*pageSize < allocSize {
panic("backing span not large enough for benchmark")
}
dwritebyte('.')
dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
}
- dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0)
+ dumpbool(t.Kind_&kindDirectIface == 0 || t.Pointers())
}
// dump an object.
var span *mspan
var header **_type
var x unsafe.Pointer
- noscan := typ == nil || typ.PtrBytes == 0
+ noscan := typ == nil || !typ.Pointers()
// In some cases block zeroing can profitably (for latency reduction purposes)
// be delayed till preemption is possible; delayedZeroing tracks that state.
delayedZeroing := false
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
- if typ.PtrBytes != 0 {
+ if typ.Pointers() {
scanSize = dataSize - typ.Size_ + typ.PtrBytes
}
} else {
ovf = (*bmap)(newobject(t.Bucket))
}
h.incrnoverflow()
- if t.Bucket.PtrBytes == 0 {
+ if !t.Bucket.Pointers() {
h.createOverflow()
*h.extra.overflow = append(*h.extra.overflow, ovf)
}
// used with this value of b.
nbuckets += bucketShift(b - 4)
sz := t.Bucket.Size_ * nbuckets
- up := roundupsize(sz, t.Bucket.PtrBytes == 0)
+ up := roundupsize(sz, !t.Bucket.Pointers())
if up != sz {
nbuckets = up / t.Bucket.Size_
}
// but may not be empty.
buckets = dirtyalloc
size := t.Bucket.Size_ * nbuckets
- if t.Bucket.PtrBytes != 0 {
+ if t.Bucket.Pointers() {
memclrHasPointers(buckets, size)
} else {
memclrNoHeapPointers(buckets, size)
// Only clear key if there are pointers in it.
if t.IndirectKey() {
*(*unsafe.Pointer)(k) = nil
- } else if t.Key.PtrBytes != 0 {
+ } else if t.Key.Pointers() {
memclrHasPointers(k, t.Key.Size_)
}
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
*(*unsafe.Pointer)(e) = nil
- } else if t.Elem.PtrBytes != 0 {
+ } else if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
- if t.Bucket.PtrBytes == 0 {
+ if !t.Bucket.Pointers() {
// Allocate the current slice and remember pointers to both current and old.
// This preserves all relevant overflow buckets alive even if
// the table grows and/or overflow buckets are added to the table
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
// Only clear key if there are pointers in it.
// This can only happen if pointers are 32 bit
// wide as 64 bit pointers do not fit into a 32 bit key.
- if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 {
+ if goarch.PtrSize == 4 && t.Key.Pointers() {
// The key must be a pointer as we checked pointers are
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
}
- e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
- if t.Elem.PtrBytes != 0 {
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
+ if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
+ if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
continue
}
// Only clear key if there are pointers in it.
- if t.Key.PtrBytes != 0 {
+ if t.Key.Pointers() {
if goarch.PtrSize == 8 {
*(*unsafe.Pointer)(k) = nil
} else {
memclrHasPointers(k, 8)
}
}
- e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
- if t.Elem.PtrBytes != 0 {
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
+ if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if t.Key.PtrBytes != 0 && writeBarrier.enabled {
+ if t.Key.Pointers() && writeBarrier.enabled {
if goarch.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
}
// Clear key's pointer.
k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
- if t.Elem.PtrBytes != 0 {
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+ if t.Elem.Pointers() {
memclrHasPointers(e, t.Elem.Size_)
} else {
memclrNoHeapPointers(e, t.Elem.Size_)
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
if dst == src {
return
}
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// This always copies a full value of type typ so it's safe
// to pass typ along as an optimization. See the comment on
// bulkBarrierPreWrite.
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
- if writeBarrier.enabled && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
+ if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize {
// Pass nil for the type. dst does not point to value of type typ,
// but rather points into one, so applying the optimization is not
// safe. See the comment on this function.
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
- if elemType.PtrBytes == 0 {
+ if !elemType.Pointers() {
return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
}
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// This always clears a whole value of type typ, so it's
// safe to pass a type here and apply the optimization.
// See the comment on bulkBarrierPreWrite.
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// Pass nil for the type. ptr does not point to value of type typ,
// but rather points into one so it's not safe to apply the optimization.
// See the comment on this function in the reflect package and the
//go:linkname reflect_typedarrayclear reflect.typedarrayclear
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
size := typ.Size_ * uintptr(len)
- if writeBarrier.enabled && typ.PtrBytes != 0 {
+ if writeBarrier.enabled && typ.Pointers() {
// This always clears whole elements of an array, so it's
// safe to pass a type here. See the comment on bulkBarrierPreWrite.
bulkBarrierPreWrite(uintptr(ptr), 0, size, typ)
if uintptr(e.data) != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
- if ot.Elem == nil || ot.Elem.PtrBytes != 0 || ot.Elem.Size_ >= maxTinySize {
+ if ot.Elem == nil || ot.Elem.Pointers() || ot.Elem.Size_ >= maxTinySize {
throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
}
}
}
var to unsafe.Pointer
- if et.PtrBytes == 0 {
+ if !et.Pointers() {
to = mallocgc(tomem, nil, false)
if copymem < tomem {
memclrNoHeapPointers(add(to, copymem), tomem-copymem)
// For 1 we don't need any division/multiplication.
// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
// For powers of 2, use a variable shift.
- noscan := et.PtrBytes == 0
+ noscan := !et.Pointers()
switch {
case et.Size_ == 1:
lenmem = uintptr(oldLen)
}
var p unsafe.Pointer
- if et.PtrBytes == 0 {
+ if !et.Pointers() {
p = mallocgc(capmem, nil, false)
// The append() that calls growslice is going to overwrite from oldLen to newLen.
// Only clear the part that will not be overwritten.
// the memory will be overwritten by an append() that called growslice.
// Since the caller of reflect_growslice is not append(),
// zero out this region before returning the slice to the reflect package.
- if et.PtrBytes == 0 {
+ if !et.Pointers() {
oldcapmem := uintptr(old.cap) * et.Size_
newlenmem := uintptr(new.len) * et.Size_
memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem)