}
func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
- if n > uintptr(h.arena_end)-uintptr(h.arena_used) {
+ if n > h.arena_end-h.arena_used {
// We are in 32-bit mode, maybe we didn't use all possible address space yet.
// Reserve some more space.
p_size := round(n+_PageSize, 256<<20)
}
}
- if n <= uintptr(h.arena_end)-uintptr(h.arena_used) {
+ if n <= h.arena_end-h.arena_used {
// Keep taking from our reservation.
p := h.arena_used
sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
}
// If using 64-bit, our reservation is all we have.
- if uintptr(h.arena_end)-uintptr(h.arena_start) >= _MaxArena32 {
+ if h.arena_end-h.arena_start >= _MaxArena32 {
return nil
}
return nil
}
- if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 {
+ if p < h.arena_start || uintptr(p)+p_size-h.arena_start >= _MaxArena32 {
print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
return nil
p_end := p + p_size
p += -p & (_PageSize - 1)
- if uintptr(p)+n > uintptr(h.arena_used) {
+ if uintptr(p)+n > h.arena_used {
mHeap_MapBits(h, p+n)
mHeap_MapSpans(h, p+n)
h.arena_used = p + n
}
// find span
- arena_start := uintptr(unsafe.Pointer(mheap_.arena_start))
- arena_used := uintptr(unsafe.Pointer(mheap_.arena_used))
+ arena_start := mheap_.arena_start
+ arena_used := mheap_.arena_used
if uintptr(v) < arena_start || uintptr(v) >= arena_used {
return
}
mSpan_Init(t, s.start+pageID(npage), s.npages-npage)
s.npages = npage
p := uintptr(t.start)
- p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
+ p -= (h.arena_start >> _PageShift)
if p > 0 {
h_spans[p-1] = s
}
s.unusedsince = 0
p := uintptr(s.start)
- p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
+ p -= (h.arena_start >> _PageShift)
for n := uintptr(0); n < npage; n++ {
h_spans[p+n] = s
}
s := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
p := uintptr(s.start)
- p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
+ p -= (h.arena_start >> _PageShift)
for i := p; i < p+s.npages; i++ {
h_spans[i] = s
}
// and is guaranteed to be start or end of span.
func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
p := uintptr(v)
- p -= uintptr(unsafe.Pointer(h.arena_start))
+ p -= h.arena_start
return h_spans[p>>_PageShift]
}
// other garbage in their middles, so we have to
// check for that.
func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
- if uintptr(v) < uintptr(unsafe.Pointer(h.arena_start)) || uintptr(v) >= uintptr(unsafe.Pointer(h.arena_used)) {
+ if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
return nil
}
p := uintptr(v) >> _PageShift
q := p
- q -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
+ q -= h.arena_start >> _PageShift
s := h_spans[q]
if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
return nil
// Coalesce with earlier, later spans.
p := uintptr(s.start)
- p -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
+ p -= h.arena_start >> _PageShift
if p > 0 {
t := h_spans[p-1]
if t != nil && t.state == _MSpanFree {