printtypename(Type *t)
{
Sym *s;
- Type *t1;
int w;
char *n;
Bprint(&outbuf, "%U", n);
break;
case TFUNC:
- Bprint(&outbuf, "func(");
- for(t1 = t->down; t1 != T; t1 = t1->down) {
- if(t1->etype == TVOID)
- break;
- if(t1 != t->down)
- Bprint(&outbuf, ", ");
- printtypename(t1);
- }
- Bprint(&outbuf, ")");
- if(t->link && t->link->etype != TVOID) {
- Bprint(&outbuf, " ");
- printtypename(t->link);
- }
+ // There's no equivalent to a C function in the Go world.
+ Bprint(&outbuf, "unsafe.Pointer");
break;
case TDOT:
Bprint(&outbuf, "...interface{}");
t := (*method)(add(unsafe.Pointer(x), unsafe.Sizeof(uncommontype{})+uintptr(j)*unsafe.Sizeof(method{})))
if t.mtyp == itype && t.name == iname && t.pkgpath == ipkgpath {
if m != nil {
- f := (*func())(add(unsafe.Pointer(m), unsafe.Sizeof(itab{})+uintptr(k)*ptrSize))
- *f = t.ifn
+ *(*unsafe.Pointer)(add(unsafe.Pointer(m), unsafe.Sizeof(itab{})+uintptr(k)*ptrSize)) = t.ifn
}
goto nextimethod
}
return cnew(typ, n);
}
-static void
-setFinalizer(Eface obj, Eface finalizer)
+void
+runtime·setFinalizer_m(void)
{
- byte *base;
- uintptr size;
- FuncType *ft;
- int32 i;
+ FuncVal *fn;
+ void *arg;
uintptr nret;
- Type *t;
Type *fint;
PtrType *ot;
- Iface iface;
- if(obj.type == nil) {
- runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
- goto throw;
- }
- if((obj.type->kind&KindMask) != KindPtr) {
- runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
- goto throw;
- }
- ot = (PtrType*)obj.type;
- // As an implementation detail we do not run finalizers for zero-sized objects,
- // because we use &runtime·zerobase for all such allocations.
- if(ot->elem != nil && ot->elem->size == 0)
- return;
- // The following check is required for cases when a user passes a pointer to composite literal,
- // but compiler makes it a pointer to global. For example:
- // var Foo = &Object{}
- // func main() {
- // runtime.SetFinalizer(Foo, nil)
- // }
- // See issue 7656.
- if((byte*)obj.data < runtime·mheap.arena_start || runtime·mheap.arena_used <= (byte*)obj.data)
- return;
- if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) {
- // As an implementation detail we allow to set finalizers for an inner byte
- // of an object if it could come from tiny alloc (see mallocgc for details).
- if(ot->elem == nil || (ot->elem->kind&KindNoPointers) == 0 || ot->elem->size >= TinySize) {
- runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.data);
- goto throw;
- }
- }
- if(finalizer.type != nil) {
- runtime·createfing();
- if((finalizer.type->kind&KindMask) != KindFunc)
- goto badfunc;
- ft = (FuncType*)finalizer.type;
- if(ft->dotdotdot || ft->in.len != 1)
- goto badfunc;
- fint = *(Type**)ft->in.array;
- if(fint == obj.type) {
- // ok - same type
- } else if((fint->kind&KindMask) == KindPtr && (fint->x == nil || fint->x->name == nil || obj.type->x == nil || obj.type->x->name == nil) && ((PtrType*)fint)->elem == ((PtrType*)obj.type)->elem) {
- // ok - not same type, but both pointers,
- // one or the other is unnamed, and same element type, so assignable.
- } else if((fint->kind&KindMask) == KindInterface && ((InterfaceType*)fint)->mhdr.len == 0) {
- // ok - satisfies empty interface
- } else if((fint->kind&KindMask) == KindInterface && runtime·ifaceE2I2((InterfaceType*)fint, obj, &iface)) {
- // ok - satisfies non-empty interface
- } else
- goto badfunc;
-
- // compute size needed for return parameters
- nret = 0;
- for(i=0; i<ft->out.len; i++) {
- t = ((Type**)ft->out.array)[i];
- nret = ROUND(nret, t->align) + t->size;
- }
- nret = ROUND(nret, sizeof(void*));
- ot = (PtrType*)obj.type;
- if(!runtime·addfinalizer(obj.data, finalizer.data, nret, fint, ot)) {
- runtime·printf("runtime.SetFinalizer: finalizer already set\n");
- goto throw;
- }
- } else {
- // NOTE: asking to remove a finalizer when there currently isn't one set is OK.
- runtime·removefinalizer(obj.data);
- }
- return;
+ fn = g->m->ptrarg[0];
+ arg = g->m->ptrarg[1];
+ nret = g->m->scalararg[0];
+ fint = g->m->ptrarg[2];
+ ot = g->m->ptrarg[3];
+ g->m->ptrarg[0] = nil;
+ g->m->ptrarg[1] = nil;
+ g->m->ptrarg[2] = nil;
+ g->m->ptrarg[3] = nil;
-badfunc:
- runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string);
-throw:
- runtime·throw("runtime.SetFinalizer");
+ g->m->scalararg[0] = runtime·addfinalizer(arg, fn, nret, fint, ot);
}
void
-runtime·setFinalizer_m(void)
+runtime·removeFinalizer_m(void)
{
- Eface obj, finalizer;
+ void *p;
- obj.type = g->m->ptrarg[0];
- obj.data = g->m->ptrarg[1];
- finalizer.type = g->m->ptrarg[2];
- finalizer.data = g->m->ptrarg[3];
+ p = g->m->ptrarg[0];
g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
- g->m->ptrarg[2] = nil;
- g->m->ptrarg[3] = nil;
- setFinalizer(obj, finalizer);
+ runtime·removefinalizer(p);
}
// mcallable cache refill
bitBoundary = 1
bitMarked = 2
bitMask = bitBoundary | bitMarked
+
+ mSpanInUse = 0
)
// Page number (address>>pageShift)
// If a finalizer must run for a long time, it should do so by starting
// a new goroutine.
func SetFinalizer(obj interface{}, finalizer interface{}) {
- // We do just enough work here to make the mcall type safe.
- // The rest is done on the M stack.
e := (*eface)(unsafe.Pointer(&obj))
- typ := e._type
- if typ == nil {
+ etyp := e._type
+ if etyp == nil {
gothrow("runtime.SetFinalizer: first argument is nil")
}
- if typ.kind&kindMask != kindPtr {
- gothrow("runtime.SetFinalizer: first argument is " + *typ._string + ", not pointer")
+ if etyp.kind&kindMask != kindPtr {
+ gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer")
+ }
+ ot := (*ptrtype)(unsafe.Pointer(etyp))
+ if ot.elem == nil {
+ gothrow("nil elem type!")
+ }
+
+ // As an implementation detail we do not run finalizers for zero-sized objects,
+ // because we use &runtime·zerobase for all such allocations.
+ if ot.elem.size == 0 {
+ return
+ }
+
+ // find the containing object
+ _, base, _ := findObject(e.data)
+
+ // The following check is required for cases when a user passes a pointer to composite
+ // literal, but compiler makes it a pointer to global. For example:
+ // var Foo = &Object{}
+ // func main() {
+ // runtime.SetFinalizer(Foo, nil)
+ // }
+ // See issue 7656.
+ if base == nil {
+ return
+ }
+
+ if e.data != base {
+ // As an implementation detail we allow to set finalizers for an inner byte
+ // of an object if it could come from tiny alloc (see mallocgc for details).
+ if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
+ gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block")
+ }
}
f := (*eface)(unsafe.Pointer(&finalizer))
ftyp := f._type
- if ftyp != nil && ftyp.kind&kindMask != kindFunc {
+ if ftyp == nil {
+ // switch to M stack and remove finalizer
+ mp := acquirem()
+ mp.ptrarg[0] = e.data
+ onM(&removeFinalizer_m)
+ releasem(mp)
+ return
+ }
+
+ if ftyp.kind&kindMask != kindFunc {
gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function")
}
+ ft := (*functype)(unsafe.Pointer(ftyp))
+ ins := *(*[]*_type)(unsafe.Pointer(&ft.in))
+ if ft.dotdotdot || len(ins) != 1 {
+ gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
+ }
+ fint := ins[0]
+ switch {
+ case fint == etyp:
+ // ok - same type
+ goto okarg
+ case fint.kind&kindMask == kindPtr:
+ if (fint.x == nil || fint.x.name == nil || etyp.x == nil || etyp.x.name == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
+ // ok - not same type, but both pointers,
+ // one or the other is unnamed, and same element type, so assignable.
+ goto okarg
+ }
+ case fint.kind&kindMask == kindInterface:
+ ityp := (*interfacetype)(unsafe.Pointer(fint))
+ if len(ityp.mhdr) == 0 {
+ // ok - satisfies empty interface
+ goto okarg
+ }
+ if _, ok := assertE2I2(ityp, obj); ok {
+ goto okarg
+ }
+ }
+ gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
+okarg:
+ // compute size needed for return parameters
+ nret := uintptr(0)
+ for _, t := range *(*[]*_type)(unsafe.Pointer(&ft.out)) {
+ nret = round(nret, uintptr(t.align)) + uintptr(t.size)
+ }
+ nret = round(nret, ptrSize)
+
+ // make sure we have a finalizer goroutine
+ createfing()
+
+ // switch to M stack to add finalizer record
mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(typ)
+ mp.ptrarg[0] = f.data
mp.ptrarg[1] = e.data
- mp.ptrarg[2] = unsafe.Pointer(ftyp)
- mp.ptrarg[3] = f.data
+ mp.scalararg[0] = nret
+ mp.ptrarg[2] = unsafe.Pointer(fint)
+ mp.ptrarg[3] = unsafe.Pointer(ot)
onM(&setFinalizer_m)
+ if mp.scalararg[0] != 1 {
+ gothrow("runtime.SetFinalizer: finalizer already set")
+ }
releasem(mp)
}
+
+// round n up to a multiple of a. a must be a power of 2.
+func round(n, a uintptr) uintptr {
+ return (n + a - 1) &^ (a - 1)
+}
+
+// Look up pointer v in heap. Return the span containing the object,
+// the start of the object, and the size of the object. If the object
+// does not exist, return nil, nil, 0.
+func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
+ c := gomcache()
+ c.local_nlookup++
+ if ptrSize == 4 && c.local_nlookup >= 1<<30 {
+ // purge cache stats to prevent overflow
+ lock(&mheap_.lock)
+ purgecachedstats(c)
+ unlock(&mheap_.lock)
+ }
+
+ // find span
+ arena_start := uintptr(unsafe.Pointer(mheap_.arena_start))
+ arena_used := uintptr(unsafe.Pointer(mheap_.arena_used))
+ if uintptr(v) < arena_start || uintptr(v) >= arena_used {
+ return
+ }
+ p := uintptr(v) >> pageShift
+ q := p - arena_start>>pageShift
+ s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize))
+ if s == nil {
+ return
+ }
+ x = unsafe.Pointer(uintptr(s.start) << pageShift)
+
+ if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
+ s = nil
+ x = nil
+ return
+ }
+
+ n = uintptr(s.elemsize)
+ if s.sizeclass != 0 {
+ x = add(x, (uintptr(v)-uintptr(x))/n*n)
+ }
+ return
+}
+
+var fingCreate uint32
+
+func createfing() {
+ // start the finalizer goroutine exactly once
+ if fingCreate == 0 && cas(&fingCreate, 0, 1) {
+ go runfinq()
+ }
+}
+
+// This is the goroutine that runs all of the finalizers
+func runfinq() {
+ var (
+ frame unsafe.Pointer
+ framecap uintptr
+ )
+
+ for {
+ lock(&finlock)
+ fb := finq
+ finq = nil
+ if fb == nil {
+ gp := getg()
+ fing = gp
+ fingwait = true
+ gp.issystem = true
+ goparkunlock(&finlock, "finalizer wait")
+ gp.issystem = false
+ continue
+ }
+ unlock(&finlock)
+ if raceenabled {
+ racefingo()
+ }
+ for fb != nil {
+ for i := int32(0); i < fb.cnt; i++ {
+ f := (*finalizer)(add(unsafe.Pointer(&fb.fin), uintptr(i)*unsafe.Sizeof(finalizer{})))
+
+ framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret)
+ if framecap < framesz {
+ // The frame does not contain pointers interesting for GC,
+ // all not yet finalized objects are stored in finq.
+ // If we do not mark it as FlagNoScan,
+ // the last finalized object is not collected.
+ frame = gomallocgc(framesz, nil, flagNoScan)
+ framecap = framesz
+ }
+
+ if f.fint == nil {
+ gothrow("missing type in runfinq")
+ }
+ switch f.fint.kind & kindMask {
+ case kindPtr:
+ // direct use of pointer
+ *(*unsafe.Pointer)(frame) = f.arg
+ case kindInterface:
+ ityp := (*interfacetype)(unsafe.Pointer(f.fint))
+ // set up with empty interface
+ (*eface)(frame)._type = &f.ot.typ
+ (*eface)(frame).data = f.arg
+ if len(ityp.mhdr) != 0 {
+ // convert to interface with methods
+ // this conversion is guaranteed to succeed - we checked in SetFinalizer
+ *(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame))
+ }
+ default:
+ gothrow("bad kind in runfinq")
+ }
+ reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
+
+ // drop finalizer queue references to finalized object
+ f.fn = nil
+ f.arg = nil
+ f.ot = nil
+ }
+ fb.cnt = 0
+ next := fb.next
+ lock(&finlock)
+ fb.next = finc
+ finc = fb
+ unlock(&finlock)
+ fb = next
+ }
+ }
+}
void runtime·createfing(void);
G* runtime·wakefing(void);
void runtime·getgcmask(byte*, Type*, byte**, uintptr*);
+
+typedef struct Finalizer Finalizer;
+struct Finalizer
+{
+ FuncVal *fn; // function to call
+ void *arg; // ptr to object
+ uintptr nret; // bytes of return values from fn
+ Type *fint; // type of first argument of fn
+ PtrType *ot; // type of ptr to object
+};
+
+typedef struct FinBlock FinBlock;
+struct FinBlock
+{
+ FinBlock *alllink;
+ FinBlock *next;
+ int32 cnt;
+ int32 cap;
+ Finalizer fin[1];
+};
+extern Mutex runtime·finlock; // protects the following variables
extern G* runtime·fing;
extern bool runtime·fingwait;
extern bool runtime·fingwake;
+extern FinBlock *runtime·finq; // list of finalizers that are to be executed
+extern FinBlock *runtime·finc; // cache of free blocks
void runtime·setprofilebucket(void *p, Bucket *b);
byte* obj[(WorkbufSize-sizeof(LFNode)-sizeof(uintptr))/PtrSize];
};
-typedef struct Finalizer Finalizer;
-struct Finalizer
-{
- FuncVal *fn;
- void *arg;
- uintptr nret;
- Type *fint;
- PtrType *ot;
-};
-
-typedef struct FinBlock FinBlock;
-struct FinBlock
-{
- FinBlock *alllink;
- FinBlock *next;
- int32 cnt;
- int32 cap;
- Finalizer fin[1];
-};
-
extern byte runtime·data[];
extern byte runtime·edata[];
extern byte runtime·bss[];
extern byte runtime·gcdata[];
extern byte runtime·gcbss[];
-static Mutex finlock; // protects the following variables
-static FinBlock *finq; // list of finalizers that are to be executed
-static FinBlock *finc; // cache of free blocks
-static FinBlock *allfin; // list of all blocks
+Mutex runtime·finlock; // protects the following variables
+G* runtime·fing; // goroutine that runs finalizers
+FinBlock* runtime·finq; // list of finalizers that are to be executed
+FinBlock* runtime·finc; // cache of free blocks
bool runtime·fingwait;
bool runtime·fingwake;
+static FinBlock *allfin; // list of all blocks
+
BitVector runtime·gcdatamask;
BitVector runtime·gcbssmask;
static Mutex gclock;
-static void runfinq(void);
static void bgsweep(void);
static Workbuf* getempty(Workbuf*);
static Workbuf* getfull(Workbuf*);
static void scanstack(G *gp);
static BitVector unrollglobgcprog(byte *prog, uintptr size);
-static FuncVal runfinqv = {runfinq};
static FuncVal bgsweepv = {bgsweep};
static struct {
FinBlock *block;
Finalizer *f;
- runtime·lock(&finlock);
- if(finq == nil || finq->cnt == finq->cap) {
- if(finc == nil) {
- finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
- finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
- finc->alllink = allfin;
- allfin = finc;
+ runtime·lock(&runtime·finlock);
+ if(runtime·finq == nil || runtime·finq->cnt == runtime·finq->cap) {
+ if(runtime·finc == nil) {
+ runtime·finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
+ runtime·finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
+ runtime·finc->alllink = allfin;
+ allfin = runtime·finc;
}
- block = finc;
- finc = block->next;
- block->next = finq;
- finq = block;
+ block = runtime·finc;
+ runtime·finc = block->next;
+ block->next = runtime·finq;
+ runtime·finq = block;
}
- f = &finq->fin[finq->cnt];
- finq->cnt++;
+ f = &runtime·finq->fin[runtime·finq->cnt];
+ runtime·finq->cnt++;
f->fn = fn;
f->nret = nret;
f->fint = fint;
f->ot = ot;
f->arg = p;
runtime·fingwake = true;
- runtime·unlock(&finlock);
+ runtime·unlock(&runtime·finlock);
}
void
runtime·throw("gchelper not running on g0 stack");
}
-static void
-runfinq(void)
-{
- Finalizer *f;
- FinBlock *fb, *next;
- byte *frame;
- uint32 framesz, framecap, i;
- Eface *ef, ef1;
-
- // This function blocks for long periods of time, and because it is written in C
- // we have no liveness information. Zero everything so that uninitialized pointers
- // do not cause memory leaks.
- f = nil;
- fb = nil;
- next = nil;
- frame = nil;
- framecap = 0;
- framesz = 0;
- i = 0;
- ef = nil;
- ef1.type = nil;
- ef1.data = nil;
-
- // force flush to memory
- USED(&f);
- USED(&fb);
- USED(&next);
- USED(&framesz);
- USED(&i);
- USED(&ef);
- USED(&ef1);
-
- for(;;) {
- runtime·lock(&finlock);
- fb = finq;
- finq = nil;
- if(fb == nil) {
- runtime·fingwait = true;
- g->issystem = true;
- runtime·parkunlock(&finlock, runtime·gostringnocopy((byte*)"finalizer wait"));
- g->issystem = false;
- continue;
- }
- runtime·unlock(&finlock);
- if(raceenabled)
- runtime·racefingo();
- for(; fb; fb=next) {
- next = fb->next;
- for(i=0; i<fb->cnt; i++) {
- f = &fb->fin[i];
- framesz = sizeof(Eface) + f->nret;
- if(framecap < framesz) {
- // The frame does not contain pointers interesting for GC,
- // all not yet finalized objects are stored in finq.
- // If we do not mark it as FlagNoScan,
- // the last finalized object is not collected.
- frame = runtime·mallocgc(framesz, 0, FlagNoScan);
- framecap = framesz;
- }
- if(f->fint == nil)
- runtime·throw("missing type in runfinq");
- if((f->fint->kind&KindMask) == KindPtr) {
- // direct use of pointer
- *(void**)frame = f->arg;
- } else if(((InterfaceType*)f->fint)->mhdr.len == 0) {
- // convert to empty interface
- ef = (Eface*)frame;
- ef->type = &f->ot->typ;
- ef->data = f->arg;
- } else {
- // convert to interface with methods, via empty interface.
- ef1.type = &f->ot->typ;
- ef1.data = f->arg;
- if(!runtime·ifaceE2I2((InterfaceType*)f->fint, ef1, (Iface*)frame))
- runtime·throw("invalid type conversion in runfinq");
- }
- reflect·call(f->fn, frame, framesz, framesz);
- f->fn = nil;
- f->arg = nil;
- f->ot = nil;
- }
- fb->cnt = 0;
- runtime·lock(&finlock);
- fb->next = finc;
- finc = fb;
- runtime·unlock(&finlock);
- }
-
- // Zero everything that's dead, to avoid memory leaks.
- // See comment at top of function.
- f = nil;
- fb = nil;
- next = nil;
- i = 0;
- ef = nil;
- ef1.type = nil;
- ef1.data = nil;
- runtime·gc(1); // trigger another gc to clean up the finalized objects, if possible
- }
-}
-
-void
-runtime·createfing(void)
-{
- if(runtime·fing != nil)
- return;
- // Here we use gclock instead of finlock,
- // because newproc1 can allocate, which can cause on-demand span sweep,
- // which can queue finalizers, which would deadlock.
- runtime·lock(&gclock);
- if(runtime·fing == nil)
- runtime·fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc);
- runtime·unlock(&gclock);
-}
-
-void
-runtime·createfingM(G *gp)
-{
- runtime·createfing();
- runtime·gogo(&gp->sched);
-}
-
G*
runtime·wakefing(void)
{
G *res;
res = nil;
- runtime·lock(&finlock);
+ runtime·lock(&runtime·finlock);
if(runtime·fingwait && runtime·fingwake) {
runtime·fingwait = false;
runtime·fingwake = false;
res = runtime·fing;
}
- runtime·unlock(&finlock);
+ runtime·unlock(&runtime·finlock);
return res;
}
gothrow("gopark: bad g status")
}
mp.waitlock = lock
- mp.waitunlockf = *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&unlockf))
+ mp.waitunlockf = unlockf
gp.waitreason = reason
releasem(mp)
// can't do anything that might move the G between Ms here.
//go:noescape
func racereleaseg(gp *g, addr unsafe.Pointer)
+func racefingo()
+
// Should be a built-in for unsafe.Pointer?
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + x)
mprofMalloc_m,
gc_m,
setFinalizer_m,
+ removeFinalizer_m,
markallocated_m,
unrollgcprog_m,
unrollgcproginplace_m,
func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
func newstackcall(fv *funcval, addr unsafe.Pointer, size uint32)
+func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
func procyield(cycles uint32)
func osyield()
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
func noteclear(n *note)
func lock(lk *mutex)
func unlock(lk *mutex)
+func purgecachedstats(c *mcache)
//go:noescape
func cas(ptr *uint32, old, new uint32) bool
TEXT bytes·Compare(SB),NOSPLIT,$0-0
JMP runtime·cmpbytes(SB)
+
+TEXT runtime·reflectcall(SB), NOSPLIT, $0-0
+ JMP reflect·call(SB)