// The OS init code failed to fetch the physical page size.
throw("failed to get system page size")
}
+ if physPageSize > maxPhysPageSize {
+ print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
+ throw("bad system page size")
+ }
if physPageSize < minPhysPageSize {
print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
throw("bad system page size")
print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
throw("bad system huge page size")
}
+ if physHugePageSize > maxPhysHugePageSize {
+ // physHugePageSize is greater than the maximum supported huge page size.
+ // Don't throw here, like in the other cases, since a system configured
+ // in this way isn't wrong, we just don't have the code to support them.
+ // Instead, silently set the huge page size to zero.
+ physHugePageSize = 0
+ }
if physHugePageSize != 0 {
// Since physHugePageSize is a power of 2, it suffices to increase
// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
// incurs an additional cost), to account for heap fragmentation and
// the ever-changing layout of the heap.
retainExtraPercent = 10
+
+ // maxPagesPerPhysPage is the maximum number of supported runtime pages per
+ // physical page, based on maxPhysPageSize.
+ maxPagesPerPhysPage = maxPhysPageSize / pageSize
)
// heapRetained returns an estimate of the current heap RSS.
//
// Note that if m == 1, this is a no-op.
//
-// m must be a power of 2 <= 64.
+// m must be a power of 2 <= maxPagesPerPhysPage.
func fillAligned(x uint64, m uint) uint64 {
apply := func(x uint64, c uint64) uint64 {
// The technique used it here is derived from
x = apply(x, 0x7fff7fff7fff7fff)
case 32:
x = apply(x, 0x7fffffff7fffffff)
- case 64:
+ case 64: // == maxPagesPerPhysPage
x = apply(x, 0x7fffffffffffffff)
+ default:
+ throw("bad m value")
}
// Now, the top bit of each m-aligned group in x is set
// that group was all zero in the original x.
// min pages of free-and-unscavenged memory in the region represented by this
// pallocData.
//
-// min must be a non-zero power of 2 <= 64.
+// min must be a non-zero power of 2 <= maxPagesPerPhysPage.
func (m *pallocData) hasScavengeCandidate(min uintptr) bool {
if min&(min-1) != 0 || min == 0 {
print("runtime: min = ", min, "\n")
throw("min must be a non-zero power of 2")
- } else if min > 64 {
+ } else if min > maxPagesPerPhysPage {
print("runtime: min = ", min, "\n")
- throw("physical page sizes > 512 KiB are not supported")
+ throw("min too large")
}
// The goal of this search is to see if the chunk contains any free and unscavenged memory.
// min indicates a hard minimum size and alignment for runs of pages. That is,
// findScavengeCandidate will not return a region smaller than min pages in size,
// or that is min pages or greater in size but not aligned to min. min must be
-// a non-zero power of 2 <= 64.
+// a non-zero power of 2 <= maxPagesPerPhysPage.
//
// max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
// findScavengeCandidate effectively returns entire free and unscavenged regions.
if min&(min-1) != 0 || min == 0 {
print("runtime: min = ", min, "\n")
throw("min must be a non-zero power of 2")
- } else if min > 64 {
+ } else if min > maxPagesPerPhysPage {
print("runtime: min = ", min, "\n")
- throw("physical page sizes > 512 KiB are not supported")
+ throw("min too large")
}
// max is allowed to be less than min, but we need to ensure
// we never truncate further than min.
}
start := end - size
+ // Each huge page is guaranteed to fit in a single palloc chunk.
+ //
+ // TODO(mknyszek): Support larger huge page sizes.
+ // TODO(mknyszek): Consider taking pages-per-huge-page as a parameter
+ // so we can write tests for this.
if physHugePageSize > pageSize && physHugePageSize > physPageSize {
// We have huge pages, so let's ensure we don't break one by scavenging
// over a huge page boundary. If the range [start, start+size) overlaps with
"unsafe"
)
-// minPhysPageSize is a lower-bound on the physical page size. The
-// true physical page size may be larger than this. In contrast,
-// sys.PhysPageSize is an upper-bound on the physical page size.
-const minPhysPageSize = 4096
+const (
+ // minPhysPageSize is a lower-bound on the physical page size. The
+ // true physical page size may be larger than this. In contrast,
+ // sys.PhysPageSize is an upper-bound on the physical page size.
+ minPhysPageSize = 4096
+
+ // maxPhysPageSize is the maximum page size the runtime supports.
+ maxPhysPageSize = 512 << 10
+
+ // maxPhysHugePageSize sets an upper-bound on the maximum huge page size
+ // that the runtime supports.
+ maxPhysHugePageSize = pallocChunkBytes
+)
// Main malloc heap.
// The heap itself is the "free" and "scav" treaps,