// return 1;
// }else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-13
+TEXT runtime·casp1(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
MOVL AX, ret+8(FP)
RET
-TEXT runtime·xchgp(SB), NOSPLIT, $0-12
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
JNZ again
RET
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-8
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
// return 1;
// } else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-25
+TEXT runtime·casp1(SB), NOSPLIT, $0-25
MOVQ ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
MOVQ AX, ret+16(FP)
RET
-TEXT runtime·xchgp(SB), NOSPLIT, $0-24
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-24
MOVQ ptr+0(FP), BX
MOVQ new+8(FP), AX
XCHGQ AX, 0(BX)
JNZ again
RET
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-16
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
// return 1;
// } else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-17
+TEXT runtime·casp1(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
MOVQ AX, ret+16(FP)
RET
-TEXT runtime·xchgp(SB), NOSPLIT, $0-12
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
JNZ again
RET
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-8
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
// return 1;
// } else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-25
+TEXT runtime·casp1(SB), NOSPLIT, $0-25
BR runtime·cas64(SB)
// uint32 xadd(uint32 volatile *val, int32 delta)
MOVD R3, ret+16(FP)
RETURN
-TEXT runtime·xchgp(SB), NOSPLIT, $0-24
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-24
BR runtime·xchg64(SB)
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RETURN
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-16
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
BR runtime·atomicstore64(SB)
TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
//go:noescape
func xchg64(ptr *uint64, new uint64) uint64
-//go:noescape
-func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+// Cannot use noescape here: ptr does not but new does escape.
+// Instead use noescape(ptr) in wrapper below.
+func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+
+//go:nosplit
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ old := xchgp1(noescape(ptr), new)
+ writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
+ return old
+}
//go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func atomicstore64(ptr *uint64, val uint64)
-//go:noescape
-func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer)
+// Cannot use noescape here: ptr does not but val does escape.
+// Instead use noescape(ptr) in wrapper below.
+func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
+
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer) {
+ atomicstorep1(noescape(ptr), val)
+ // TODO(rsc): Why does the compiler think writebarrierptr_nostore's dst argument escapes?
+ writebarrierptr_nostore((*uintptr)(noescape(ptr)), uintptr(val))
+}
+
+// Cannot use noescape here: ptr does not but new does escape.
+// Instead use noescape(ptr) in wrapper below.
+func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+
+//go:nosplit
+func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
+ ok := casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), old, new)
+ if !ok {
+ return false
+ }
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+ return true
+}
slot = (byte**)g->m->scalararg[0];
ptr = (byte*)g->m->scalararg[1];
- *slot = ptr;
switch(runtime·gcphase) {
default:
runtime·throw("gcphasework in bad gcphase");
// but if we do that, Go inserts a write barrier on *dst = src.
//go:nosplit
func writebarrierptr(dst *uintptr, src uintptr) {
+ *dst = src
+ writebarrierptr_nostore(dst, src)
+}
+
+// Like writebarrierptr, but the store has already been applied.
+// Do not reapply.
+//go:nosplit
+func writebarrierptr_nostore(dst *uintptr, src uintptr) {
+ if getg() == nil { // very low-level startup
+ return
+ }
+
if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
onM(func() { gothrow("bad pointer in write barrier") })
}
mp := acquirem()
if mp.inwb {
- *dst = src
releasem(mp)
return
}
mp.scalararg[1] = oldscalar1
mp.inwb = false
releasem(mp)
- // *dst = src is done inside of the write barrier.
}
//go:nosplit
unlockextra(mp);
}
-#define MLOCKED ((M*)1)
+#define MLOCKED 1
// lockextra locks the extra list and returns the list head.
// The caller must unlock the list by storing a new list head
static M*
lockextra(bool nilokay)
{
- M *mp;
+ uintptr mpx;
void (*yield)(void);
for(;;) {
- mp = runtime·atomicloadp(&runtime·extram);
- if(mp == MLOCKED) {
+ mpx = runtime·atomicloaduintptr((uintptr*)&runtime·extram);
+ if(mpx == MLOCKED) {
yield = runtime·osyield;
yield();
continue;
}
- if(mp == nil && !nilokay) {
+ if(mpx == 0 && !nilokay) {
runtime·usleep(1);
continue;
}
- if(!runtime·casp(&runtime·extram, mp, MLOCKED)) {
+ if(!runtime·casuintptr((uintptr*)&runtime·extram, mpx, MLOCKED)) {
yield = runtime·osyield;
yield();
continue;
}
break;
}
- return mp;
+ return (M*)mpx;
}
#pragma textflag NOSPLIT
bool runtime·cas(uint32*, uint32, uint32);
bool runtime·cas64(uint64*, uint64, uint64);
bool runtime·casp(void**, void*, void*);
+bool runtime·casuintptr(uintptr*, uintptr, uintptr);
// Don't confuse with XADD x86 instruction,
// this one is actually 'addx', that is, add-and-fetch.
uint32 runtime·xadd(uint32 volatile*, int32);
s.len = runtime·findnull(str);
while(true) {
ms = runtime·maxstring;
- if(s.len <= ms || runtime·casp((void**)&runtime·maxstring, (void*)ms, (void*)s.len))
+ if(s.len <= ms || runtime·casuintptr(&runtime·maxstring, ms, s.len))
return s;
}
}
//go:noescape
func cas(ptr *uint32, old, new uint32) bool
-//go:noescape
-func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
-
//go:noescape
func casuintptr(ptr *uintptr, old, new uintptr) bool