//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
- whichByte := allocBitIndex / 8
- whichBit := allocBitIndex % 8
- bytePtr := addb(s.allocBits, whichByte)
- return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
+ bytep, mask := s.allocBits.bitp(allocBitIndex)
+ return markBits{bytep, mask, allocBitIndex}
}
// refillaCache takes 8 bytes s.allocBits starting at whichByte
// can be used. It then places these 8 bytes into the cached 64 bit
// s.allocCache.
func (s *mspan) refillAllocCache(whichByte uintptr) {
- bytes := (*[8]uint8)(unsafe.Pointer(addb(s.allocBits, whichByte)))
+ bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
aCache := uint64(0)
aCache |= uint64(bytes[0])
aCache |= uint64(bytes[1]) << (1 * 8)
if index < s.freeindex {
return false
}
- whichByte := index / 8
- whichBit := index % 8
- byteVal := *addb(s.allocBits, whichByte)
- return byteVal&uint8(1<<whichBit) == 0
+ bytep, mask := s.allocBits.bitp(index)
+ return *bytep&mask == 0
}
func (s *mspan) objIndex(p uintptr) uintptr {
}
func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
- whichByte := objIndex / 8
- bitMask := uint8(1 << (objIndex % 8)) // low 3 bits hold the bit index
- bytePtr := addb(s.gcmarkBits, whichByte)
- return markBits{bytePtr, bitMask, objIndex}
+ bytep, mask := s.gcmarkBits.bitp(objIndex)
+ return markBits{bytep, mask, objIndex}
}
func (s *mspan) markBitsForBase() markBits {
- return markBits{s.gcmarkBits, uint8(1), 0}
+ return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
}
// isMarked reports whether mark bit m is set.
count := 0
maxIndex := s.nelems / 8
for i := uintptr(0); i < maxIndex; i++ {
- mrkBits := *addb(s.gcmarkBits, i)
+ mrkBits := *s.gcmarkBits.bytep(i)
count += int(oneBitCount[mrkBits])
}
if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
- mrkBits := *addb(s.gcmarkBits, maxIndex)
+ mrkBits := *s.gcmarkBits.bytep(maxIndex)
mask := uint8((1 << bitsInLastByte) - 1)
bits := mrkBits & mask
count += int(oneBitCount[bits])
// The sweep will free the old allocBits and set allocBits to the
// gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
// out memory.
- allocBits *uint8
- gcmarkBits *uint8
+ allocBits *gcBits
+ gcmarkBits *gcBits
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
}
}
+// gcBits is an alloc/mark bitmap. This is always used as *gcBits.
+//
+//go:notinheap
+type gcBits uint8
+
+// bytep returns a pointer to the n'th byte of b.
+func (b *gcBits) bytep(n uintptr) *uint8 {
+ return addb((*uint8)(b), n)
+}
+
+// bitp returns a pointer to the byte containing bit n and a mask for
+// selecting that bit from *bytep.
+func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
+ return b.bytep(n / 8), 1 << (n % 8)
+}
+
const gcBitsChunkBytes = uintptr(64 << 10)
const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
free uintptr // free is the index into bits of the next free byte; read/write atomically
next *gcBitsArena
- bits [gcBitsChunkBytes - gcBitsHeaderBytes]uint8
+ bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
}
var gcBitsArenas struct {
// tryAlloc allocates from b or returns nil if b does not have enough room.
// This is safe to call concurrently.
-func (b *gcBitsArena) tryAlloc(bytes uintptr) *uint8 {
+func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
return nil
}
// newMarkBits returns a pointer to 8 byte aligned bytes
// to be used for a span's mark bits.
-func newMarkBits(nelems uintptr) *uint8 {
+func newMarkBits(nelems uintptr) *gcBits {
blocksNeeded := uintptr((nelems + 63) / 64)
bytesNeeded := blocksNeeded * 8
// allocation bits. For spans not being initialized the
// the mark bits are repurposed as allocation bits when
// the span is swept.
-func newAllocBits(nelems uintptr) *uint8 {
+func newAllocBits(nelems uintptr) *gcBits {
return newMarkBits(nelems)
}