This isn't C anymore. No binary change to pkg/linux_amd64/runtime.a.
Change-Id: I24d66b0f5ac888f432b874aac684b1395e7c8345
Reviewed-on: https://go-review.googlesource.com/15903
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
return
}
- write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
+ write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
if len >= bufSize {
write(dumpfd, data, int32(len))
nbuf = 0
}
func flush() {
- write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
+ write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
nbuf = 0
}
} else {
// Other M's are waiting for the lock.
// Dequeue an M.
- mp = (*m)((unsafe.Pointer)(v &^ locked))
+ mp = (*m)(unsafe.Pointer(v &^ locked))
if casuintptr(&l.key, v, mp.nextwaitm) {
// Dequeued an M. Wake it.
semawakeup(mp)
// TODO: It would be bad if part of the arena
// is reserved and part is not.
var reserved bool
- p := uintptr(sysReserve((unsafe.Pointer)(h.arena_end), p_size, &reserved))
+ p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
if p == 0 {
return nil
}
h.arena_reserved = reserved
} else {
var stat uint64
- sysFree((unsafe.Pointer)(p), p_size, &stat)
+ sysFree(unsafe.Pointer(p), p_size, &stat)
}
}
}
if n <= uintptr(h.arena_end)-uintptr(h.arena_used) {
// Keep taking from our reservation.
p := h.arena_used
- sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys)
+ sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
mHeap_MapBits(h, p+n)
mHeap_MapSpans(h, p+n)
h.arena_used = p + n
if raceenabled {
- racemapshadow((unsafe.Pointer)(p), n)
+ racemapshadow(unsafe.Pointer(p), n)
}
if uintptr(p)&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc")
}
- return (unsafe.Pointer)(p)
+ return unsafe.Pointer(p)
}
// If using 64-bit, our reservation is all we have.
if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 {
print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
- sysFree((unsafe.Pointer)(p), p_size, &memstats.heap_sys)
+ sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
return nil
}
h.arena_end = p_end
}
if raceenabled {
- racemapshadow((unsafe.Pointer)(p), n)
+ racemapshadow(unsafe.Pointer(p), n)
}
}
if uintptr(p)&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc")
}
- return (unsafe.Pointer)(p)
+ return unsafe.Pointer(p)
}
// base address for all 0-byte allocations
// which prevents us from allocating more stack.
//go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
- v := (unsafe.Pointer)(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(v) < 4096 {
return nil
}
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
*reserved = true
- p := (unsafe.Pointer)(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ p := unsafe.Pointer(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(p) < 4096 {
return nil
}
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
mSysStatInc(sysStat, n)
- p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
+ p := unsafe.Pointer(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
if uintptr(p) == _ENOMEM {
throw("runtime: out of memory")
}
f.nchunk = _FixAllocChunk
}
- v := (unsafe.Pointer)(f.chunk)
+ v := unsafe.Pointer(f.chunk)
if f.first != nil {
fn := *(*func(unsafe.Pointer, unsafe.Pointer))(unsafe.Pointer(&f.first))
fn(f.arg, v)
throw("still in list")
}
if s.npreleased > 0 {
- sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
+ sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
memstats.heap_released -= uint64(s.npreleased << _PageShift)
s.npreleased = 0
}
h_spans[p] = s
mSpanList_Remove(t)
t.state = _MSpanDead
- fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
+ fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
}
}
if (p+s.npages)*ptrSize < h.spans_mapped {
h_spans[p+s.npages-1] = s
mSpanList_Remove(t)
t.state = _MSpanDead
- fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
+ fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
}
}
memstats.heap_released += uint64(released)
sumreleased += released
s.npreleased = s.npages
- sysUnused((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
+ sysUnused(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
}
}
return sumreleased
// There was an old finalizer
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
+ fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
unlock(&mheap_.speciallock)
return false
}
return // there wasn't a finalizer to remove
}
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
+ fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
unlock(&mheap_.speciallock)
}
sf := (*specialfinalizer)(unsafe.Pointer(s))
queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(sf))
+ fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(sf))
unlock(&mheap_.speciallock)
return false // don't free p until finalizer is done
case _KindSpecialProfile:
sp := (*specialprofile)(unsafe.Pointer(s))
mProf_Free(sp.b, size, freed)
lock(&mheap_.speciallock)
- fixAlloc_Free(&mheap_.specialprofilealloc, (unsafe.Pointer)(sp))
+ fixAlloc_Free(&mheap_.specialprofilealloc, unsafe.Pointer(sp))
unlock(&mheap_.speciallock)
return true
default:
return -1
}
len := findnull(&msg[0])
- if write(uintptr(fd), (unsafe.Pointer)(&msg[0]), int32(len)) != int64(len) {
+ if write(uintptr(fd), unsafe.Pointer(&msg[0]), int32(len)) != int64(len) {
closefd(fd)
return -1
}
// Record the panic that is running the defer.
// If there is a new panic during the deferred call, that panic
// will find d in the list and will mark d._panic (this panic) aborted.
- d._panic = (*_panic)(noescape((unsafe.Pointer)(&p)))
+ d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
p.argp = unsafe.Pointer(getargp(0))
reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
// Adds stack x to the free pool. Must be called with stackpoolmu held.
func stackpoolfree(x gclinkptr, order uint8) {
- s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
+ s := mHeap_Lookup(&mheap_, unsafe.Pointer(x))
if s.state != _MSpanStack {
throw("freeing stack not in a stack span")
}
c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n)
}
- v = (unsafe.Pointer)(x)
+ v = unsafe.Pointer(x)
} else {
s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
if s == nil {
throw("out of memory")
}
- v = (unsafe.Pointer)(s.start << _PageShift)
+ v = unsafe.Pointer(s.start << _PageShift)
}
if raceenabled {
func stackfree(stk stack, n uintptr) {
gp := getg()
- v := (unsafe.Pointer)(stk.lo)
+ v := unsafe.Pointer(stk.lo)
if n&(n-1) != 0 {
throw("stack not a power of 2")
}
}
func adjustctxt(gp *g, adjinfo *adjustinfo) {
- adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
+ adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
}
func adjustdefers(gp *g, adjinfo *adjustinfo) {
// Adjust pointers in the Defer structs.
// Defer structs themselves are never on the stack.
for d := gp._defer; d != nil; d = d.link {
- adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
- adjustpointer(adjinfo, (unsafe.Pointer)(&d.sp))
- adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
+ adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
+ adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
+ adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
}
}
func adjustpanics(gp *g, adjinfo *adjustinfo) {
// Panics are on stack and already adjusted.
// Update pointer to head of list in G.
- adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
+ adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
}
func adjustsudogs(gp *g, adjinfo *adjustinfo) {
// the data elements pointed to by a SudoG structure
// might be in the stack.
for s := gp.waiting; s != nil; s = s.waitlink {
- adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
- adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
+ adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
+ adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
}
}
func adjuststkbar(gp *g, adjinfo *adjustinfo) {
for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
- adjustpointer(adjinfo, (unsafe.Pointer)(&gp.stkbar[i].savedLRPtr))
+ adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
}
}
func gostartcallfn(gobuf *gobuf, fv *funcval) {
var fn unsafe.Pointer
if fv != nil {
- fn = (unsafe.Pointer)(fv.fn)
+ fn = unsafe.Pointer(fv.fn)
} else {
fn = unsafe.Pointer(funcPC(nilfunc))
}
- gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
+ gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
// Maybe shrink the stack being used by gp.