// 64 bit and 32 bit platforms, allowing the tests to share code
// between the two.
//
-// On AIX, the arenaBaseOffset is 0x0a00000000000000. However, this
-// constant can't be used here because it is negative and will cause
-// a constant overflow.
-//
// This should not be higher than 0x100*pallocChunkBytes to support
// mips and mipsle, which only have 31-bit address spaces.
-var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + 0x0a00000000000000*sys.GoosAix))
+var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*sys.GoosAix))
// PageBase returns an address given a chunk index and a page index
// relative to that chunk.
//
// On other platforms, the user address space is contiguous
// and starts at 0, so no offset is necessary.
- arenaBaseOffset = sys.GoarchAmd64*(1<<47) + (^0x0a00000000000000+1)&uintptrMask*sys.GoosAix
+ arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix
// Max number of threads to run garbage collection.
// 2, 3, and 4 are all plausible maximums depending
//
//go:nosplit
func arenaIndex(p uintptr) arenaIdx {
- return arenaIdx((p + arenaBaseOffset) / heapArenaBytes)
+ return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)
}
// arenaBase returns the low address of the region covered by heap
// arena i.
func arenaBase(i arenaIdx) uintptr {
- return uintptr(i)*heapArenaBytes - arenaBaseOffset
+ return uintptr(i)*heapArenaBytes + arenaBaseOffset
}
type arenaIdx uint
// chunkIndex returns the global index of the palloc chunk containing the
// pointer p.
func chunkIndex(p uintptr) chunkIdx {
- return chunkIdx((p + arenaBaseOffset) / pallocChunkBytes)
+ return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes)
}
// chunkIndex returns the base address of the palloc chunk at index ci.
func chunkBase(ci chunkIdx) uintptr {
- return uintptr(ci)*pallocChunkBytes - arenaBaseOffset
+ return uintptr(ci)*pallocChunkBytes + arenaBaseOffset
}
// chunkPageIndex computes the index of the page that contains p,
// offAddrToLevelIndex converts an address in the offset address space
// to the index into summary[level] containing addr.
func offAddrToLevelIndex(level int, addr offAddr) int {
- return int((addr.a + arenaBaseOffset) >> levelShift[level])
+ return int((addr.a - arenaBaseOffset) >> levelShift[level])
}
// levelIndexToOffAddr converts an index into summary[level] into
// the corresponding address in the offset address space.
func levelIndexToOffAddr(level, idx int) offAddr {
- return offAddr{(uintptr(idx) << levelShift[level]) - arenaBaseOffset}
+ return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset}
}
// addrsToSummaryRange converts base and limit pointers into a range
// of a summary's max page count boundary for this level
// (1 << levelLogPages[level]). So, make limit an inclusive upper bound
// then shift, then add 1, so we get an exclusive upper bound at the end.
- lo = int((base + arenaBaseOffset) >> levelShift[level])
- hi = int(((limit-1)+arenaBaseOffset)>>levelShift[level]) + 1
+ lo = int((base - arenaBaseOffset) >> levelShift[level])
+ hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1
return
}
// Throws if the base and limit are not in the same memory segment.
func makeAddrRange(base, limit uintptr) addrRange {
r := addrRange{offAddr{base}, offAddr{limit}}
- if (base+arenaBaseOffset >= arenaBaseOffset) != (limit+arenaBaseOffset >= arenaBaseOffset) {
+ if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) {
throw("addr range base and limit are not in the same memory segment")
}
return r
var (
// minOffAddr is the minimum address in the offset space, and
- // it corresponds to the virtual address -arenaBaseOffset.
- //
- // We don't initialize this with offAddrFromRaw because allocation
- // may happen during bootstrapping, and we rely on this value
- // being initialized.
- //
- // As a result, creating this value in Go is tricky because of
- // overflow not being allowed in constants. In order to get
- // the value we want, we take arenaBaseOffset and do a manual
- // two's complement negation, then mask that into what can fit
- // into a uintptr.
- minOffAddr = offAddr{((^arenaBaseOffset) + 1) & uintptrMask}
+ // it corresponds to the virtual address arenaBaseOffset.
+ minOffAddr = offAddr{arenaBaseOffset}
// maxOffAddr is the maximum address in the offset address
- // space, and it corresponds to the virtual address
- // ^uintptr(0) - arenaBaseOffset.
- //
- // We don't initialize this with offAddrFromRaw because allocation
- // may happen during bootstrapping, and we rely on this value
- // being initialized.
- maxOffAddr = offAddr{^uintptr(0) - arenaBaseOffset}
+ // space. It corresponds to the highest virtual address representable
+ // by the page alloc chunk and heap arena maps.
+ maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
)
// offAddr represents an address in a contiguous view
// of the address space on systems where the address space is
// segmented. On other systems, it's just a normal address.
type offAddr struct {
+ // a is just the virtual address, but should never be used
+ // directly. Call addr() to get this value instead.
a uintptr
}
// lessThan returns true if l1 is less than l2 in the offset
// address space.
func (l1 offAddr) lessThan(l2 offAddr) bool {
- return (l1.a + arenaBaseOffset) < (l2.a + arenaBaseOffset)
+ return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset)
}
// lessEqual returns true if l1 is less than or equal to l2 in
// the offset address space.
func (l1 offAddr) lessEqual(l2 offAddr) bool {
- return (l1.a + arenaBaseOffset) <= (l2.a + arenaBaseOffset)
+ return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset)
}
// equal returns true if the two offAddr values are equal.