}
tmerge(t, s);
s->type = t;
+ if(c == CTYPEDEF && (typechlv[t->etype] || typefd[t->etype])) {
+ s->type = copytyp(t);
+ s->type->tag = s;
+ }
s->class = c;
s->block = 0;
s->offset = o;
switch(t->etype) {
case TINT:
- Bprint(&outbuf, "int32");
- break;
case TUINT:
- Bprint(&outbuf, "uint32");
- break;
case TCHAR:
- Bprint(&outbuf, "int8");
- break;
case TUCHAR:
- Bprint(&outbuf, "uint8");
- break;
case TSHORT:
- Bprint(&outbuf, "int16");
- break;
case TUSHORT:
- Bprint(&outbuf, "uint16");
- break;
case TLONG:
- // The 32/64-bit ambiguous types (int,uint,uintptr)
- // are assigned a TLONG/TULONG to distinguish them
- // from always 32-bit types which get a TINT/TUINT.
- // (See int_x/uint_x in pkg/runtime/runtime.h.)
- // For LONG and VLONG types, we generate the
- // unqualified Go type when appropriate.
- // This makes it easier to write Go code that
- // modifies objects with autogenerated-from-C types.
- if(ewidth[TIND] == 4)
- Bprint(&outbuf, "int");
- else
- Bprint(&outbuf, "int32");
- break;
case TULONG:
- if(ewidth[TIND] == 4)
- Bprint(&outbuf, "uint");
- else
- Bprint(&outbuf, "uint32");
- break;
case TVLONG:
- if(ewidth[TIND] == 8)
- Bprint(&outbuf, "int");
- else
- Bprint(&outbuf, "int64");
- break;
case TUVLONG:
- if(ewidth[TIND] == 8)
- Bprint(&outbuf, "uint");
- else
- Bprint(&outbuf, "uint64");
- break;
case TFLOAT:
- Bprint(&outbuf, "float32");
- break;
case TDOUBLE:
- Bprint(&outbuf, "float64");
+ // All names used in the runtime code should be typedefs.
+ if(t->tag != nil) {
+ if(strcmp(t->tag->name, "intgo") == 0)
+ Bprint(&outbuf, "int");
+ else if(strcmp(t->tag->name, "uintgo") == 0)
+ Bprint(&outbuf, "uint");
+ else
+ Bprint(&outbuf, "%s", t->tag->name);
+ } else
+ Bprint(&outbuf, "C.%T", t);
break;
case TUNION:
case TSTRUCT:
const (
cacheLineSize = 64
)
+
+type uintreg uint32
+type intptr int32 // TODO(rsc): remove
const (
cacheLineSize = 64
)
+
+type uintreg uint64
+type intptr int64 // TODO(rsc): remove
const (
cacheLineSize = 64
)
+
+type uintreg uint64
+type intptr int32 // TODO(rsc): remove
const (
cacheLineSize = 32
)
+
+type uintreg uint32
+type intptr int32 // TODO(rsc): remove
MOVB AX, ret+12(FP)
RET
+TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
+ JMP runtime·cas(SB)
+
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
MOVL $0, AX
MOVB AX, ret+24(FP)
RET
+
+TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
+ JMP runtime·cas64(SB)
// bool casp(void **val, void *old, void *new)
// Atomically:
// restore when returning from f.
MOVL 0(SP), AX // our caller's PC
MOVL AX, (m_morebuf+gobuf_pc)(BX)
- LEAL addr+4(FP), AX // our caller's SP
+ LEAL fn+0(FP), AX // our caller's SP
MOVL AX, (m_morebuf+gobuf_sp)(BX)
MOVL g(CX), AX
MOVL AX, (m_morebuf+gobuf_g)(BX)
MOVB AX, ret+16(FP)
RET
+TEXT runtime·casuintptr(SB), NOSPLIT, $0-17
+ JMP runtime·cas(SB)
+
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// TEXT runtime·cas(SB),NOSPLIT,$0
// B runtime·armcas(SB)
//
-TEXT runtime·armcas(SB),NOSPLIT,$0-12
+TEXT runtime·armcas(SB),NOSPLIT,$0-13
MOVW valptr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
CMP $0, R0
BNE casl
MOVW $1, R0
+ MOVB R0, ret+12(FP)
RET
casfail:
MOVW $0, R0
+ MOVB R0, ret+12(FP)
RET
+TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
+ JMP runtime·cas(SB)
+
TEXT runtime·stackguard(SB),NOSPLIT,$0-8
MOVW R13, R1
MOVW g_stackguard(g), R2
// entry point for c <- x from compiled code
//go:nosplit
func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
- chansend(t, c, elem, true, gogetcallerpc(unsafe.Pointer(&t)))
+ chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
}
/*
var t0 int64
if blockprofilerate > 0 {
- t0 = gocputicks()
+ t0 = cputicks()
}
golock(&c.lock)
// to assign to both types in Go. At some point we'll
// write the Go types directly instead of generating them
// via the C types. At that point, this nastiness goes away.
- *(*int64)(unsafe.Pointer(&sg.releasetime)) = gocputicks()
+ *(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks()
}
goready(recvg)
return true
panic("send on closed channel")
}
if mysg.releasetime > 0 {
- goblockevent(int64(mysg.releasetime)-t0, 3)
+ blockevent(int64(mysg.releasetime)-t0, 2)
}
if mysg != gp.waiting {
gothrow("G waiting list is corrupted!")
recvg := sg.g
gounlock(&c.lock)
if sg.releasetime != 0 {
- *(*int64)(unsafe.Pointer(&sg.releasetime)) = gocputicks()
+ *(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks()
}
goready(recvg)
} else {
gounlock(&c.lock)
}
if t1 > 0 {
- goblockevent(t1-t0, 3)
+ blockevent(t1-t0, 2)
}
return true
}
// if sgp participates in a select and is already signaled, ignore it
if sgp.selectdone != nil {
// claim the right to signal
- if *sgp.selectdone != 0 || !gocas(sgp.selectdone, 0, 1) {
+ if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) {
continue
}
}
func NewParFor(nthrmax uint32) *ParFor {
mp := acquirem()
- mp.scalararg[0] = uint(nthrmax)
+ mp.scalararg[0] = uintptr(nthrmax)
onM(&newparfor_m)
desc := (*ParFor)(mp.ptrarg[0])
mp.ptrarg[0] = nil
mp.ptrarg[0] = unsafe.Pointer(desc)
mp.ptrarg[1] = unsafe.Pointer(ctx)
mp.ptrarg[2] = **(**unsafe.Pointer)(unsafe.Pointer(&body))
- mp.scalararg[0] = uint(nthr)
- mp.scalararg[1] = uint(n)
+ mp.scalararg[0] = uintptr(nthr)
+ mp.scalararg[1] = uintptr(n)
mp.scalararg[2] = 0
if wait {
mp.scalararg[2] = 1
func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(desc)
- mp.scalararg[0] = uint(tid)
+ mp.scalararg[0] = uintptr(tid)
onM(&parforiters_m)
begin := uint32(mp.scalararg[0])
end := uint32(mp.scalararg[1])
}
// check compiler's and reflect's math
- if t.key.size > maxKeySize && (t.indirectkey == 0 || t.keysize != uint8(ptrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey == 1 || t.keysize != uint8(t.key.size)) {
+ if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
+ t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
gothrow("key size wrong")
}
- if t.elem.size > maxValueSize && (t.indirectvalue == 0 || t.valuesize != uint8(ptrSize)) ||
- t.elem.size <= maxValueSize && (t.indirectvalue == 1 || t.valuesize != uint8(t.elem.size)) {
+ if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
+ t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
gothrow("value size wrong")
}
// hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey != 0 {
+ if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
return v
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey != 0 {
+ if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
return v, true
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey != 0 {
+ if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k, uintptr(t.key.size)) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
return k, v
panic("assignment to entry in nil map")
}
if raceenabled {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapassign1
pc := **(**uintptr)(unsafe.Pointer(&fn))
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
- if t.indirectkey != 0 {
+ if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2, uintptr(t.key.size)) {
memmove(k2, key, uintptr(t.key.size))
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
v2 := v
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
v2 = *((*unsafe.Pointer)(v2))
}
memmove(v2, val, uintptr(t.elem.size))
}
// store new key/value at insert position
- if t.indirectkey != 0 {
+ if t.indirectkey {
if checkgc {
memstats.next_gc = memstats.heap_alloc
}
*(*unsafe.Pointer)(insertk) = kmem
insertk = kmem
}
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
if checkgc {
memstats.next_gc = memstats.heap_alloc
}
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapdelete
pc := **(**uintptr)(unsafe.Pointer(&fn))
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
- if t.indirectkey != 0 {
+ if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2, uintptr(t.key.size)) {
it.bptr = nil
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapiterinit
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
if old == old|iterator|oldIterator {
break
}
- if gocas(&h.flags, old, old|iterator|oldIterator) {
+ if cas(&h.flags, old, old|iterator|oldIterator) {
break
}
}
func mapiternext(it *hiter) {
h := it.h
if raceenabled {
- callerpc := gogetcallerpc(unsafe.Pointer(&it))
+ callerpc := getcallerpc(unsafe.Pointer(&it))
fn := mapiternext
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
k2 := k
- if t.indirectkey != 0 {
+ if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if alg.equal(k2, k2, uintptr(t.key.size)) {
}
if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
// this is the golden data, we can return it.
- if t.indirectkey != 0 {
+ if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
it.key = k
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
it.value = v
// The hash table has grown since the iterator was started.
// The golden data for this key is now somewhere else.
k2 := k
- if t.indirectkey != 0 {
+ if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if alg.equal(k2, k2, uintptr(t.key.size)) {
// us because when key!=key we can't look it up
// successfully in the current table.
it.key = k2
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
it.value = v
gothrow("bad map state")
}
k2 := k
- if t.indirectkey != 0 {
+ if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
// Compute hash to make our evacuation decision (whether we need
xv = add(xk, bucketCnt*uintptr(t.keysize))
}
x.tophash[xi] = top
- if t.indirectkey != 0 {
+ if t.indirectkey {
*(*unsafe.Pointer)(xk) = k2 // copy pointer
} else {
memmove(xk, k, uintptr(t.key.size)) // copy value
}
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
} else {
memmove(xv, v, uintptr(t.elem.size))
yv = add(yk, bucketCnt*uintptr(t.keysize))
}
y.tophash[yi] = top
- if t.indirectkey != 0 {
+ if t.indirectkey {
*(*unsafe.Pointer)(yk) = k2
} else {
memmove(yk, k, uintptr(t.key.size))
}
- if t.indirectvalue != 0 {
+ if t.indirectvalue {
*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
} else {
memmove(yv, v, uintptr(t.elem.size))
return 0
}
if raceenabled {
- callerpc := gogetcallerpc(unsafe.Pointer(&h))
+ callerpc := getcallerpc(unsafe.Pointer(&h))
fn := reflect_maplen
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1_fast32
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2_fast32
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1_fast64
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2_fast64
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess1_faststr
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := mapaccess2_faststr
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadpc(unsafe.Pointer(h), callerpc, pc)
if locked != 0 {
golock(&ifaceLock)
}
- for m = (*itab)(goatomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
+ for m = (*itab)(atomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
if m.inter == inter && m._type == typ {
if m.bad != 0 {
m = nil
}
}
- m = (*itab)(gopersistentalloc(unsafe.Sizeof(itab{}) + uintptr(len(inter.mhdr))*ptrSize))
+ m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr))*ptrSize, 0, &memstats.other_sys))
m.inter = inter
m._type = typ
gothrow("invalid itab locking")
}
m.link = hash[h]
- goatomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
+ atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
gounlock(&ifaceLock)
if m.bad != 0 {
return nil
func typ2Itab(t *_type, inter *interfacetype, cache **itab) *itab {
tab := getitab(inter, t, false)
- goatomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
+ atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
return tab
}
}
func convT2I(t *_type, inter *interfacetype, cache **itab, elem unsafe.Pointer) (i fInterface) {
- tab := (*itab)(goatomicloadp(unsafe.Pointer(cache)))
+ tab := (*itab)(atomicloadp(unsafe.Pointer(cache)))
if tab == nil {
tab = getitab(inter, t, false)
- goatomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
+ atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
}
size := uintptr(t.size)
pi := (*iface)(unsafe.Pointer(&i))
bitMask = bitBoundary | bitMarked
)
+// Page number (address>>pageShift)
+type pageID uintptr
+
// All zero-sized allocations return a pointer to this byte.
var zeroObject byte
}
mp.mallocing = 1
if mp.curg != nil {
- mp.curg.stackguard0 = ^uint(0xfff) | 0xbad
+ mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad
}
}
// The object fits into existing tiny block.
x = tiny
c.tiny = (*byte)(add(x, size))
- c.tinysize -= uint(size1)
+ c.tinysize -= uintptr(size1)
if debugMalloc {
mp := acquirem()
if mp.mallocing == 0 {
// based on amount of remaining free space.
if maxTinySize-size > tinysize {
c.tiny = (*byte)(add(x, size))
- c.tinysize = uint(maxTinySize - size)
+ c.tinysize = uintptr(maxTinySize - size)
}
size = maxTinySize
} else {
v := s.freelist
if v == nil {
mp := acquirem()
- mp.scalararg[0] = uint(sizeclass)
+ mp.scalararg[0] = uintptr(sizeclass)
onM(&mcacheRefill_m)
releasem(mp)
s = c.alloc[sizeclass]
}
}
}
- c.local_cachealloc += int(size)
+ c.local_cachealloc += intptr(size)
} else {
mp := acquirem()
- mp.scalararg[0] = uint(size)
- mp.scalararg[1] = uint(flags)
+ mp.scalararg[0] = uintptr(size)
+ mp.scalararg[1] = uintptr(flags)
onM(&largeAlloc_m)
s = (*mspan)(mp.ptrarg[0])
mp.ptrarg[0] = nil
mp := acquirem()
mp.ptrarg[0] = x
mp.ptrarg[1] = unsafe.Pointer(typ)
- mp.scalararg[0] = uint(size)
- mp.scalararg[1] = uint(size0)
+ mp.scalararg[0] = uintptr(size)
+ mp.scalararg[1] = uintptr(size0)
onM(&unrollgcproginplace_m)
releasem(mp)
goto marked
}
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
// Check whether the program is already unrolled.
- if uintptr(goatomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
+ if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(typ)
onM(&unrollgcprog_m)
}
c.next_sample = next
}
- mp.scalararg[0] = uint(size)
+ mp.scalararg[0] = uintptr(size)
mp.ptrarg[0] = x
onM(&mprofMalloc_m)
}
// force = 1 - do GC regardless of current heap usage
// force = 2 - go GC and eager sweep
func gogc(force int32) {
- if memstats.enablegc == 0 {
+ if !memstats.enablegc {
return
}
if gcpercent == gcpercentUnknown {
golock(&mheap_.lock)
if gcpercent == gcpercentUnknown {
- gcpercent = goreadgogc()
+ gcpercent = readgogc()
}
gounlock(&mheap_.lock)
}
}
// Ok, we're doing it! Stop everybody else
- startTime := gonanotime()
+ startTime := nanotime()
mp = acquirem()
mp.gcing = 1
releasem(mp)
}
for i := 0; i < n; i++ {
if i > 0 {
- startTime = gonanotime()
+ startTime = nanotime()
}
// switch to g0, call gc, then switch back
- mp.scalararg[0] = uint(uint32(startTime)) // low 32 bits
- mp.scalararg[1] = uint(startTime >> 32) // high 32 bits
+ mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits
+ mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits
if force >= 2 {
mp.scalararg[2] = 1 // eagersweep
} else {
PageSize = 1<<PageShift,
PageMask = PageSize - 1,
};
-typedef uintptr PageID; // address >> PageShift
+typedef uintptr pageID; // address >> PageShift
enum
{
{
MSpan *next; // in a span linked list
MSpan *prev; // in a span linked list
- PageID start; // starting page number
+ pageID start; // starting page number
uintptr npages; // number of pages in span
MLink *freelist; // list of free objects
// sweep generation:
Special *specials; // linked list of special records sorted by offset.
};
-void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages);
+void runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages);
void runtime·MSpan_EnsureSwept(MSpan *span);
bool runtime·MSpan_Sweep(MSpan *span, bool preserve);
Eface *eface;
Type *typ;
MSpan *s;
- PageID k;
+ pageID k;
bool keepworking;
// Cache memory arena parameters in local vars.
{
uintptr n;
MSpan *s, *t;
- PageID p;
+ pageID p;
// Try in fixed-size lists up to max.
for(n=npage; n < nelem(h->free); n++) {
uintptr ask;
void *v;
MSpan *s;
- PageID p;
+ pageID p;
// Ask for a big chunk, to reduce the number of mappings
// the operating system needs to track; also amortizes
runtime·MHeap_LookupMaybe(MHeap *h, void *v)
{
MSpan *s;
- PageID p, q;
+ pageID p, q;
if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
return nil;
MHeap_FreeSpanLocked(MHeap *h, MSpan *s, bool acctinuse, bool acctidle)
{
MSpan *t;
- PageID p;
+ pageID p;
switch(s->state) {
case MSpanStack:
// Initialize a new span with the given start and npages.
void
-runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages)
+runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages)
{
span->next = nil;
span->prev = nil;
r.FreeBytes = int64(b.data.mp.free_bytes)
r.AllocObjects = int64(b.data.mp.allocs)
r.FreeObjects = int64(b.data.mp.frees)
- for i := 0; uint(i) < b.nstk && i < len(r.Stack0); i++ {
+ for i := 0; uintptr(i) < b.nstk && i < len(r.Stack0); i++ {
r.Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize))
}
- for i := b.nstk; i < uint(len(r.Stack0)); i++ {
+ for i := b.nstk; i < uintptr(len(r.Stack0)); i++ {
r.Stack0[i] = 0
}
}
p[idx].Count = int64(bp.count)
p[idx].Cycles = int64(bp.cycles)
i := 0
- for uint(i) < b.nstk && i < len(p[idx].Stack0) {
+ for uintptr(i) < b.nstk && i < len(p[idx].Stack0) {
p[idx].Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize))
i++
}
// If all is true, Stack formats stack traces of all other goroutines
// into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int {
- sp := gogetcallersp(unsafe.Pointer(&buf))
- pc := gogetcallerpc(unsafe.Pointer(&buf))
+ sp := getcallersp(unsafe.Pointer(&buf))
+ pc := getcallerpc(unsafe.Pointer(&buf))
mp := acquirem()
gp := mp.curg
if all {
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
- first := (*m)(goatomicloadp(unsafe.Pointer(&allm)))
+ first := (*m)(atomicloadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
n++
}
runtime·unlock(&runtime·proflock);
}
-void
-runtime·blockevent_m(void)
-{
- runtime·blockevent(g->m->scalararg[0] + ((int64)g->m->scalararg[1]<<32), g->m->scalararg[2]);
-}
-
void
runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr))
{
func printstring(s string) {
mp := acquirem()
- mp.scalararg[0] = uint(len(s))
+ mp.scalararg[0] = uintptr(len(s))
mp.ptrarg[0] = (*stringStruct)(unsafe.Pointer(&s)).str
onM(&printstring_m)
releasem(mp)
func printhex(x uintptr) {
mp := acquirem()
- mp.scalararg[0] = uint(x)
+ mp.scalararg[0] = uintptr(x)
onM(&printhex_m)
releasem(mp)
}
gothrow("gopark: bad g status")
}
mp.waitlock = lock
- mp.waitunlockf = *(*func(*g, unsafe.Pointer) uint8)(unsafe.Pointer(&unlockf))
+ mp.waitunlockf = *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&unlockf))
gp.waitreason = reason
releasem(mp)
// can't do anything that might move the G between Ms here.
releasem(mp)
}
-func goblockevent(cycles int64, skip int32) {
- // TODO: convert to Go when we do mprof.goc
- mp := acquirem()
- mp.scalararg[0] = uint(uint32(cycles))
- mp.scalararg[1] = uint(cycles >> 32)
- mp.scalararg[2] = uint(skip)
- onM(&blockevent_m)
- releasem(mp)
-}
-
//go:nosplit
func acquireSudog() *sudog {
c := gomcache()
func setMaxStack(in int) (out int) {
out = int(maxstacksize)
- maxstacksize = uint(in)
+ maxstacksize = uintptr(in)
return out
}
func setGCPercent(in int32) (out int32) {
mp := acquirem()
- mp.scalararg[0] = uint(int(in))
+ mp.scalararg[0] = uintptr(int(in))
onM(&setgcpercent_m)
out = int32(int(mp.scalararg[0]))
releasem(mp)
return out
}
-func setPanicOnFault(newb bool) (old bool) {
- new := uint8(0)
- if newb {
- new = 1
- }
-
+func setPanicOnFault(new bool) (old bool) {
mp := acquirem()
- old = mp.curg.paniconfault == 1
+ old = mp.curg.paniconfault
mp.curg.paniconfault = new
releasem(mp)
return old
func setMaxThreads(in int) (out int) {
mp := acquirem()
- mp.scalararg[0] = uint(in)
+ mp.scalararg[0] = uintptr(in)
onM(&setmaxthreads_m)
out = int(mp.scalararg[0])
releasem(mp)
typedef int64 intgo; // Go's int
typedef uint64 uintgo; // Go's uint
#else
-// Normally, "int" == "long int" == 32 bits.
-// However, the C compiler uses this distinction
-// to disambiguate true 32 bit ints (e.g. int32)
-// from 32/64 bit ints (e.g. uintptr) so that it
-// can generate the corresponding go type correctly.
-typedef signed long int int32_x;
-typedef unsigned long int uint32_x;
-typedef uint32_x uintptr;
-typedef int32_x intptr;
-typedef int32_x intgo; // Go's int
-typedef uint32_x uintgo; // Go's uint
+typedef uint32 uintptr;
+typedef int32 intptr;
+typedef int32 intgo; // Go's int
+typedef uint32 uintgo; // Go's uint
#endif
#ifdef _64BITREG
t0 := int64(0)
s.releasetime = 0
if profile && blockprofilerate > 0 {
- t0 = gocputicks()
+ t0 = cputicks()
s.releasetime = -1
}
for {
golock(&root.lock)
// Add ourselves to nwait to disable "easy case" in semrelease.
- goxadd(&root.nwait, 1)
+ xadd(&root.nwait, 1)
// Check cansemacquire to avoid missed wakeup.
if cansemacquire(addr) {
- goxadd(&root.nwait, ^uint32(0))
+ xadd(&root.nwait, -1)
gounlock(&root.lock)
break
}
}
}
if s.releasetime > 0 {
- goblockevent(int64(s.releasetime)-t0, 4)
+ blockevent(int64(s.releasetime)-t0, 3)
}
releaseSudog(s)
}
func semrelease(addr *uint32) {
root := semroot(addr)
- goxadd(addr, 1)
+ xadd(addr, 1)
// Easy case: no waiters?
// This check must happen after the xadd, to avoid a missed wakeup
// (see loop in semacquire).
- if goatomicload(&root.nwait) == 0 {
+ if atomicload(&root.nwait) == 0 {
return
}
// Harder case: search for a waiter and wake it.
golock(&root.lock)
- if goatomicload(&root.nwait) == 0 {
+ if atomicload(&root.nwait) == 0 {
// The count is already consumed by another goroutine,
// so no need to wake up another goroutine.
gounlock(&root.lock)
s := root.head
for ; s != nil; s = s.next {
if s.elem == unsafe.Pointer(addr) {
- goxadd(&root.nwait, ^uint32(0))
+ xadd(&root.nwait, -1)
root.dequeue(s)
break
}
gounlock(&root.lock)
if s != nil {
if s.releasetime != 0 {
- // TODO: Remove use of unsafe here.
- releasetimep := (*int64)(unsafe.Pointer(&s.releasetime))
- *releasetimep = gocputicks()
+ s.releasetime = cputicks()
}
goready(s.g)
}
func cansemacquire(addr *uint32) bool {
for {
- v := goatomicload(addr)
+ v := atomicload(addr)
if v == 0 {
return false
}
- if gocas(addr, v, v-1) {
+ if cas(addr, v, v-1) {
return true
}
}
w.releasetime = 0
t0 := int64(0)
if blockprofilerate > 0 {
- t0 = gocputicks()
+ t0 = cputicks()
w.releasetime = -1
}
if s.tail == nil {
s.tail = w
goparkunlock(&s.lock, "semacquire")
if t0 != 0 {
- goblockevent(int64(w.releasetime)-t0, 3)
+ blockevent(int64(w.releasetime)-t0, 2)
}
releaseSudog(w)
}
s.tail = nil
}
if wake.releasetime != 0 {
- // TODO: Remove use of unsafe here.
- releasetimep := (*int64)(unsafe.Pointer(&wake.releasetime))
- *releasetimep = gocputicks()
+ wake.releasetime = cputicks()
}
goready(wake.g)
n--
if ok {
return
}
- gonotetsleepg(&signote, -1)
- gonoteclear(&signote)
+ notetsleepg(&signote, -1)
+ noteclear(&signote)
}
}
func signal_enable(s uint32) {
mp := acquirem()
- mp.scalararg[0] = uint(s)
+ mp.scalararg[0] = uintptr(s)
onM(&signal_enable_m)
releasem(mp)
}
func signal_disable(s uint32) {
mp := acquirem()
- mp.scalararg[0] = uint(s)
+ mp.scalararg[0] = uintptr(s)
onM(&signal_disable_m)
releasem(mp)
}
}
if raceenabled {
- callerpc := gogetcallerpc(unsafe.Pointer(&t))
+ callerpc := getcallerpc(unsafe.Pointer(&t))
fn := growslice
pc := **(**uintptr)(unsafe.Pointer(&fn))
racereadrangepc(old.array, old.len*int(t.elem.size), callerpc, pc)
}
if raceenabled {
- callerpc := gogetcallerpc(unsafe.Pointer(&to))
+ callerpc := getcallerpc(unsafe.Pointer(&to))
fn := slicecopy
pc := **(**uintptr)(unsafe.Pointer(&fn))
racewriterangepc(to.array, n*int(width), callerpc, pc)
}
if raceenabled {
- callerpc := gogetcallerpc(unsafe.Pointer(&to))
+ callerpc := getcallerpc(unsafe.Pointer(&to))
fn := slicestringcopy
pc := **(**uintptr)(unsafe.Pointer(&fn))
racewriterangepc(unsafe.Pointer(&to[0]), n, callerpc, pc)
fn := slicebytetostring
racereadrangepc(unsafe.Pointer(&b[0]),
len(b),
- gogetcallerpc(unsafe.Pointer(&b)),
+ getcallerpc(unsafe.Pointer(&b)),
**(**uintptr)(unsafe.Pointer(&fn)))
}
s, c := rawstring(len(b))
fn := slicebytetostringtmp
racereadrangepc(unsafe.Pointer(&b[0]),
len(b),
- gogetcallerpc(unsafe.Pointer(&b)),
+ getcallerpc(unsafe.Pointer(&b)),
**(**uintptr)(unsafe.Pointer(&fn)))
}
return *(*string)(unsafe.Pointer(&b))
fn := slicerunetostring
racereadrangepc(unsafe.Pointer(&a[0]),
len(a)*int(unsafe.Sizeof(a[0])),
- gogetcallerpc(unsafe.Pointer(&a)),
+ getcallerpc(unsafe.Pointer(&a)),
**(**uintptr)(unsafe.Pointer(&fn)))
}
var dum [4]byte
for {
ms := maxstring
- if uintptr(size) <= uintptr(ms) || gocasx((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) {
+ if uintptr(size) <= uintptr(ms) || casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) {
return
}
}
ptrSize = unsafe.Sizeof((*byte)(nil))
)
-//go:noescape
-func gogetcallerpc(p unsafe.Pointer) uintptr
-
-//go:noescape
-func gogetcallersp(p unsafe.Pointer) uintptr
-
//go:noescape
func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
setmaxthreads_m,
ready_m,
park_m,
- blockevent_m,
notewakeup_m,
notetsleepg_m mFunction
)
+func blockevent(int64, int32)
+
// memclr clears n bytes starting at ptr.
// in memclr_*.s
//go:noescape
concurrentSweep = true
)
-// Atomic operations to read/write a pointer.
-// in stubs.goc
-func goatomicload(p *uint32) uint32 // return *p
-func goatomicloadp(p unsafe.Pointer) unsafe.Pointer // return *p
-func goatomicstore(p *uint32, v uint32) // *p = v
-func goatomicstorep(p unsafe.Pointer, v unsafe.Pointer) // *p = v
-
-// in stubs.goc
-// if *p == x { *p = y; return true } else { return false }, atomically
-//go:noescape
-func gocas(p *uint32, x uint32, y uint32) bool
-
-//go:noescape
-func goxadd(p *uint32, x uint32) uint32
-
-//go:noescape
-func gocasx(p *uintptr, x uintptr, y uintptr) bool
-
-func goreadgogc() int32
-func gonanotime() int64
func gosched()
func starttheworld()
func stoptheworld()
return unsafe.Pointer(x ^ 0)
}
-// gopersistentalloc allocates a permanent (not garbage collected)
-// memory region of size n. Use wisely!
-func gopersistentalloc(n uintptr) unsafe.Pointer
-
-func gocputicks() int64
-
-func gonoteclear(n *note) {
- n.key = 0
-}
-
-func gonotewakeup(n *note) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(n)
- onM(¬ewakeup_m)
- releasem(mp)
-}
-
-func gonotetsleepg(n *note, t int64) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(n)
- mp.scalararg[0] = uint(uint32(t)) // low 32 bits
- mp.scalararg[1] = uint(t >> 32) // high 32 bits
- releasem(mp)
- mcall(¬etsleepg_m)
- exitsyscall()
-}
-
func exitsyscall()
func goroutineheader(gp *g)
func jmpdefer(fv *funcval, argp unsafe.Pointer)
func exit1(code int32)
func asminit()
-func getcallersp(argp unsafe.Pointer) uintptr
-func cas(ptr *uint32, old, new uint32) bool
-func cas64(ptr *uint64, old, new uint64) bool
-func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
-func xadd(ptr *uint32, delta int32) uint32
-func xadd64(ptr *uint64, delta int64) uint64
-func xchg(ptr *uint32, new uint32) uint32
-func xchg64(ptr *uint64, new uint64) uint64
-func xchgp(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
-func atomicstore(ptr *uint32, val uint32)
-func atomicstore64(ptr *uint64, val uint64)
-func atomicstorep(ptr *unsafe.Pointer, val unsafe.Pointer)
-func atomicload(ptr *uint32) uint32
-func atomicload64(ptr *uint64) uint64
-func atomicloadp(ptr *unsafe.Pointer) unsafe.Pointer
-func atomicor8(ptr *uint8, val uint8)
func setg(gg *g)
func exit(code int32)
func breakpoint()
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
-func setcallerpc(argp unsafe.Pointer, pc uintptr)
-func getcallerpc(argp unsafe.Pointer) uintptr
func newstackcall(fv *funcval, addr unsafe.Pointer, size uint32)
func procyield(cycles uint32)
func osyield()
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
func cmpstring(s1, s2 string) int
+func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer
+func readgogc() int32
+func notetsleepg(n *note, ns int64)
+func notetsleep(n *note, ns int64)
+func notewakeup(n *note)
+func notesleep(n *note)
+func noteclear(n *note)
+
+//go:noescape
+func cas(ptr *uint32, old, new uint32) bool
+
+//go:noescape
+func cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+
+//go:noescape
+func casuintptr(ptr *uintptr, old, new uintptr) bool
+
+//go:noescape
+func xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func xadd64(ptr *uint64, delta int64) uint64
+
+//go:noescape
+func xchg(ptr *uint32, new uint32) uint32
+
+//go:noescape
+func xchg64(ptr *uint64, new uint64) uint64
+
+//go:noescape
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func atomicstore(ptr *uint32, val uint32)
+
+//go:noescape
+func atomicstore64(ptr *uint64, val uint64)
+
+//go:noescape
+func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer)
+
+//go:noescape
+func atomicload(ptr *uint32) uint32
+
+//go:noescape
+func atomicload64(ptr *uint64) uint64
+
+//go:noescape
+func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func atomicor8(ptr *uint8, val uint8)
+
+//go:noescape
+func setcallerpc(argp unsafe.Pointer, pc uintptr)
+
+//go:noescape
+func getcallerpc(argp unsafe.Pointer) uintptr
+
+//go:noescape
+func getcallersp(argp unsafe.Pointer) uintptr
runtime·unlock(p);
}
-#pragma textflag NOSPLIT
-func goreadgogc() (r int32) {
- r = runtime·readgogc();
-}
-
// entry point for testing
// TODO: mcall and run on M stack
func gostringW(str Slice) (s String) {
s = runtime·gostringw((uint16*)str.array);
}
-#pragma textflag NOSPLIT
-func gonanotime() (r int64) {
- r = runtime·nanotime();
-}
-
-#pragma textflag NOSPLIT
-func goatomicload(p *uint32) (v uint32) {
- v = runtime·atomicload(p);
-}
-
-#pragma textflag NOSPLIT
-func goatomicloadp(p **byte) (v *byte) {
- v = runtime·atomicloadp(p);
-}
-
-#pragma textflag NOSPLIT
-func goatomicstore(p *uint32, v uint32) {
- runtime·atomicstore(p, v);
-}
-
-#pragma textflag NOSPLIT
-func goatomicstorep(p **byte, v *byte) {
- runtime·atomicstorep(p, v);
-}
-
-#pragma textflag NOSPLIT
-func runtime·goxadd(p *uint32, x uint32) (ret uint32) {
- ret = runtime·xadd(p, x);
-}
-
-#pragma textflag NOSPLIT
-func runtime·gocas(p *uint32, x uint32, y uint32) (ret bool) {
- ret = runtime·cas(p, x, y);
-}
-
-#pragma textflag NOSPLIT
-func runtime·gocasx(p *uintptr, x uintptr, y uintptr) (ret bool) {
- ret = runtime·casp((void**)p, (void*)x, (void*)y);
-}
-
#pragma textflag NOSPLIT
func runtime·getg() (ret *G) {
ret = g;
mask.cap = mask.len;
}
-#pragma textflag NOSPLIT
-func gopersistentalloc(size uintptr) (x *void) {
- // TODO: used only for itabs for now. Need to make &mstats.other_sys arg parameterized.
- x = runtime·persistentalloc(size, 0, &mstats.other_sys);
-}
-
#pragma textflag NOSPLIT
func reflect·typelinks() (ret Slice) {
extern Type *runtime·typelink[], *runtime·etypelink[];
}
func (c *wincallbackcontext) isCleanstack() bool {
- return c.cleanstack == 1
+ return c.cleanstack
}
func (c *wincallbackcontext) setCleanstack(cleanstack bool) {
- if cleanstack {
- c.cleanstack = 1
- } else {
- c.cleanstack = 0
- }
+ c.cleanstack = cleanstack
}
var (
if len(ft.out) != 1 {
panic("compilecallback: function must have one output parameter")
}
- uintptrSize := uint(unsafe.Sizeof(uintptr(0)))
+ uintptrSize := unsafe.Sizeof(uintptr(0))
if t := (**_type)(unsafe.Pointer(&ft.out[0])); (*t).size != uintptrSize {
panic("compilecallback: output parameter size is wrong")
}
- argsize := uint(0)
+ argsize := uintptr(0)
for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] {
if (*t).size != uintptrSize {
panic("compilecallback: input parameter size is wrong")
#endif
TEXT time·runtimeNano(SB),NOSPLIT,$0-0
- JMP runtime·gonanotime(SB)
+ JMP runtime·nanotime(SB)
TEXT time·Sleep(SB),NOSPLIT,$0-0
JMP runtime·timeSleep(SB)
}
t := new(timer)
- t.when = gonanotime() + ns
+ t.when = nanotime() + ns
t.f = goroutineReady
t.arg = getg()
golock(&timers.lock)
// siftup moved to top: new earliest deadline.
if timers.sleeping {
timers.sleeping = false
- gonotewakeup(&timers.waitnote)
+ notewakeup(&timers.waitnote)
}
if timers.rescheduling {
timers.rescheduling = false
// If addtimer inserts a new earlier event, addtimer1 wakes timerproc early.
func timerproc() {
timers.gp = getg()
- timers.gp.issystem = 1
+ timers.gp.issystem = true
for {
golock(&timers.lock)
timers.sleeping = false
- now := gonanotime()
+ now := nanotime()
delta := int64(-1)
for {
if len(timers.t) == 0 {
}
// At least one timer pending. Sleep until then.
timers.sleeping = true
- gonoteclear(&timers.waitnote)
+ noteclear(&timers.waitnote)
gounlock(&timers.lock)
- gonotetsleepg(&timers.waitnote, delta)
+ notetsleepg(&timers.waitnote, delta)
}
}