func sysUsed(v unsafe.Pointer, n uintptr) {
}
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+}
+
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
func sysUsed(v unsafe.Pointer, n uintptr) {
}
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+}
+
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
madvise(v, n, _MADV_FREE_REUSE)
}
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+}
+
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
func sysUsed(v unsafe.Pointer, n uintptr) {
}
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+}
+
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
}
func sysUsed(v unsafe.Pointer, n uintptr) {
- if physHugePageSize != 0 {
- // Partially undo the NOHUGEPAGE marks from sysUnused
- // for whole huge pages between v and v+n. This may
- // leave huge pages off at the end points v and v+n
- // even though allocations may cover these entire huge
- // pages. We could detect this and undo NOHUGEPAGE on
- // the end points as well, but it's probably not worth
- // the cost because when neighboring allocations are
- // freed sysUnused will just set NOHUGEPAGE again.
+ // Partially undo the NOHUGEPAGE marks from sysUnused
+ // for whole huge pages between v and v+n. This may
+ // leave huge pages off at the end points v and v+n
+ // even though allocations may cover these entire huge
+ // pages. We could detect this and undo NOHUGEPAGE on
+ // the end points as well, but it's probably not worth
+ // the cost because when neighboring allocations are
+ // freed sysUnused will just set NOHUGEPAGE again.
+ sysHugePage(v, n)
+}
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+ if physHugePageSize != 0 {
// Round v up to a huge page boundary.
beg := (uintptr(v) + (physHugePageSize - 1)) &^ (physHugePageSize - 1)
// Round v+n down to a huge page boundary.
func sysUsed(v unsafe.Pointer, n uintptr) {
}
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+}
+
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
// sysReserve has already allocated all heap memory,
// but has not adjusted stats.
}
}
+func sysHugePage(v unsafe.Pointer, n uintptr) {
+}
+
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//go:nosplit
h.free.insert(other)
}
+ hpBefore := s.hugePages()
+
// Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
if s.scavenged == before.scavenged {
realign(s, after, after)
}
}
+
+ if !s.scavenged && s.hugePages() > hpBefore {
+ // If s has grown such that it now may contain more huge pages than it
+ // did before, then mark the whole region as huge-page-backable.
+ //
+ // Otherwise, on systems where we break up huge pages (like Linux)
+ // s may not be backed by huge pages because it could be made up of
+ // pieces which are broken up in the underlying VMA. The primary issue
+ // with this is that it can lead to a poor estimate of the amount of
+ // free memory backed by huge pages for determining the scavenging rate.
+ sysHugePage(unsafe.Pointer(s.base()), s.npages*pageSize)
+ }
}
// hugePages returns the number of aligned physical huge pages in the memory