// Create an extra M for callbacks on threads not created by Go on first cgo call.
if needextram == 1 && cas(&needextram, 1, 0) {
- newextram()
+ onM(newextram)
}
/*
gp := getg()
if gp.m.needextram {
gp.m.needextram = false
- newextram()
+ onM(newextram)
}
// Add entry to defer stack in case of panic.
runtime·ncpu = out;
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
*(int32*)0x1005 = 0x1005;
}
+static void badfutexwakeup(void);
+
+#pragma textflag NOSPLIT
void
runtime·futexwakeup(uint32 *addr, uint32 cnt)
{
int32 ret;
+ void (*fn)(void);
ret = runtime·sys_umtx_wakeup(addr, cnt);
if(ret >= 0)
return;
- runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
+ g->m->ptrarg[0] = addr;
+ g->m->scalararg[0] = ret;
+ fn = badfutexwakeup;
+ if(g == g->m->gsignal)
+ fn();
+ else
+ runtime·onM(&fn);
*(int32*)0x1006 = 0x1006;
}
+static void
+badfutexwakeup(void)
+{
+ void *addr;
+ int32 ret;
+
+ addr = g->m->ptrarg[0];
+ ret = g->m->scalararg[0];
+ runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
+}
+
void runtime·lwp_start(void*);
void
runtime·ncpu = getncpu();
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
*(int32*)0x1005 = 0x1005;
}
+static void badfutexwakeup(void);
+
+#pragma textflag NOSPLIT
void
runtime·futexwakeup(uint32 *addr, uint32 cnt)
{
int32 ret;
+ void (*fn)(void);
ret = runtime·sys_umtx_op(addr, UMTX_OP_WAKE_PRIVATE, cnt, nil, nil);
if(ret >= 0)
return;
- runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
+ g->m->ptrarg[0] = addr;
+ g->m->scalararg[0] = ret;
+ fn = badfutexwakeup;
+ if(g == g->m->gsignal)
+ fn();
+ else
+ runtime·onM(&fn);
*(int32*)0x1006 = 0x1006;
}
+static void
+badfutexwakeup(void)
+{
+ void *addr;
+ int32 ret;
+
+ addr = g->m->ptrarg[0];
+ ret = g->m->scalararg[0];
+ runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
+}
+
void runtime·thr_start(void*);
void
runtime·ncpu = getncpu();
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
runtime·futex(addr, FUTEX_WAIT, val, &ts, nil, 0);
}
+static void badfutexwakeup(void);
+
// If any procs are sleeping on addr, wake up at most cnt.
+#pragma textflag NOSPLIT
void
runtime·futexwakeup(uint32 *addr, uint32 cnt)
{
int64 ret;
+ void (*fn)(void);
ret = runtime·futex(addr, FUTEX_WAKE, cnt, nil, nil, 0);
-
if(ret >= 0)
return;
// I don't know that futex wakeup can return
// EAGAIN or EINTR, but if it does, it would be
// safe to loop and call futex again.
- runtime·printf("futexwakeup addr=%p returned %D\n", addr, ret);
+ g->m->ptrarg[0] = addr;
+ g->m->scalararg[0] = (int32)ret; // truncated but fine
+ fn = badfutexwakeup;
+ if(g == g->m->gsignal)
+ fn();
+ else
+ runtime·onM(&fn);
*(int32*)0x1006 = 0x1006;
}
+static void
+badfutexwakeup(void)
+{
+ void *addr;
+ int64 ret;
+
+ addr = g->m->ptrarg[0];
+ ret = (int32)g->m->scalararg[0];
+ runtime·printf("futexwakeup addr=%p returned %D\n", addr, ret);
+}
+
extern runtime·sched_getaffinity(uintptr pid, uintptr len, uintptr *buf);
static int32
getproccount(void)
byte* runtime·startup_random_data;
uint32 runtime·startup_random_data_len;
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
*(int32*)0 = 0;
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
}
}
-uintptr
-runtime·semacreate(void)
+static void
+semacreate(void)
{
int32 mu, cond;
runtime·throw("semacreate");
}
g->m->waitsemalock = mu;
- return cond; // assigned to m->waitsema
+ g->m->scalararg[0] = cond; // assigned to m->waitsema
}
#pragma textflag NOSPLIT
-int32
-runtime·semasleep(int64 ns)
+uint32
+runtime·semacreate(void)
+{
+ void (*fn)(void);
+ uint32 x;
+
+ fn = semacreate;
+ runtime·onM(&fn);
+ x = g->m->scalararg[0];
+ g->m->scalararg[0] = 0;
+ return x;
+}
+
+static void
+semasleep(void)
{
int32 ret;
+ int64 ns;
+
+ ns = (int64)(uint32)g->m->scalararg[0] | (int64)(uint32)g->m->scalararg[1]<<32;
+ g->m->scalararg[0] = 0;
+ g->m->scalararg[1] = 0;
ret = runtime·nacl_mutex_lock(g->m->waitsemalock);
if(ret < 0) {
if(g->m->waitsemacount > 0) {
g->m->waitsemacount = 0;
runtime·nacl_mutex_unlock(g->m->waitsemalock);
- return 0;
+ g->m->scalararg[0] = 0;
+ return;
}
while(g->m->waitsemacount == 0) {
ret = runtime·nacl_cond_timed_wait_abs(g->m->waitsema, g->m->waitsemalock, &ts);
if(ret == -ETIMEDOUT) {
runtime·nacl_mutex_unlock(g->m->waitsemalock);
- return -1;
+ g->m->scalararg[0] = -1;
+ return;
}
if(ret < 0) {
//runtime·printf("nacl_cond_timed_wait_abs: error %d\n", -ret);
g->m->waitsemacount = 0;
runtime·nacl_mutex_unlock(g->m->waitsemalock);
- return 0;
+ g->m->scalararg[0] = 0;
}
-void
-runtime·semawakeup(M *mp)
+#pragma textflag NOSPLIT
+int32
+runtime·semasleep(int64 ns)
+{
+ int32 r;
+ void (*fn)(void);
+
+ g->m->scalararg[0] = (uint32)ns;
+ g->m->scalararg[1] = (uint32)(ns>>32);
+ fn = semasleep;
+ runtime·onM(&fn);
+ r = g->m->scalararg[0];
+ g->m->scalararg[0] = 0;
+ return r;
+}
+
+static void
+semawakeup(void)
{
int32 ret;
+ M *mp;
+ mp = g->m->ptrarg[0];
+ g->m->ptrarg[0] = nil;
+
ret = runtime·nacl_mutex_lock(mp->waitsemalock);
if(ret < 0) {
//runtime·printf("nacl_mutex_lock: error %d\n", -ret);
runtime·nacl_mutex_unlock(mp->waitsemalock);
}
+#pragma textflag NOSPLIT
+void
+runtime·semawakeup(M *mp)
+{
+ void (*fn)(void);
+
+ g->m->ptrarg[0] = mp;
+ fn = semawakeup;
+ runtime·onM(&fn);
+}
+
uintptr
runtime·memlimit(void)
{
runtime·ncpu = getncpu();
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
runtime·ncpu = getncpu();
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
*(int32*)0 = 0;
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
}
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
}
}
+#pragma textflag NOSPLIT
void
runtime·get_random_data(byte **rnd, int32 *rnd_len)
{
g->lockedm = g->m;
}
+#pragma textflag NOSPLIT
void
runtime·LockOSThread(void)
{
lockOSThread();
}
+#pragma textflag NOSPLIT
void
runtime·lockOSThread(void)
{
g->lockedm = nil;
}
+#pragma textflag NOSPLIT
void
runtime·UnlockOSThread(void)
{
unlockOSThread();
}
+static void badunlockOSThread(void);
+
+#pragma textflag NOSPLIT
void
runtime·unlockOSThread(void)
{
- if(g->m->locked < LockInternal)
- runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
+ void (*fn)(void);
+
+ if(g->m->locked < LockInternal) {
+ fn = badunlockOSThread;
+ runtime·onM(&fn);
+ }
g->m->locked -= LockInternal;
unlockOSThread();
}
+static void
+badunlockOSThread(void)
+{
+ runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
+}
+
bool
runtime·lockedOSThread(void)
{
void runtime·racecall(void(*f)(void), ...);
// checks if the address has shadow (i.e. heap or data/bss)
+#pragma textflag NOSPLIT
static bool
isvalidaddr(uintptr addr)
{
return false;
}
+#pragma textflag NOSPLIT
uintptr
runtime·raceinit(void)
{
return racectx;
}
+#pragma textflag NOSPLIT
void
runtime·racefini(void)
{
runtime·racecall(__tsan_fini);
}
+#pragma textflag NOSPLIT
void
runtime·racemapshadow(void *addr, uintptr size)
{
runtime·racecall(__tsan_malloc, p, sz);
}
+#pragma textflag NOSPLIT
uintptr
runtime·racegostart(void *pc)
{
return racectx;
}
+#pragma textflag NOSPLIT
void
runtime·racegoend(void)
{
runtime·racecall(__tsan_go_end, g->racectx);
}
+#pragma textflag NOSPLIT
void
runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc)
{
runtime·racefuncexit();
}
+#pragma textflag NOSPLIT
void
runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc)
{
runtime·racefuncexit();
}
+#pragma textflag NOSPLIT
void
runtime·racewriteobjectpc(void *addr, Type *t, void *callpc, void *pc)
{
runtime·racewritepc(addr, callpc, pc);
}
+#pragma textflag NOSPLIT
void
runtime·racereadobjectpc(void *addr, Type *t, void *callpc, void *pc)
{
runtime·racereadpc(addr, callpc, pc);
}
+#pragma textflag NOSPLIT
void
runtime·raceacquire(void *addr)
{
runtime·raceacquireg(g, addr);
}
+#pragma textflag NOSPLIT
void
runtime·raceacquireg(G *gp, void *addr)
{
runtime·racecall(__tsan_acquire, gp->racectx, addr);
}
+#pragma textflag NOSPLIT
void
runtime·racerelease(void *addr)
{
runtime·racereleaseg(g, addr);
}
+#pragma textflag NOSPLIT
void
runtime·racereleaseg(G *gp, void *addr)
{
runtime·racecall(__tsan_release, gp->racectx, addr);
}
+#pragma textflag NOSPLIT
void
runtime·racereleasemerge(void *addr)
{
runtime·racereleasemergeg(g, addr);
}
+#pragma textflag NOSPLIT
void
runtime·racereleasemergeg(G *gp, void *addr)
{
runtime·racecall(__tsan_release_merge, gp->racectx, addr);
}
+#pragma textflag NOSPLIT
void
runtime·racefingo(void)
{
}
// func RaceAcquire(addr unsafe.Pointer)
+#pragma textflag NOSPLIT
void
runtime·RaceAcquire(void *addr)
{
}
// func RaceRelease(addr unsafe.Pointer)
+#pragma textflag NOSPLIT
void
runtime·RaceRelease(void *addr)
{
}
// func RaceReleaseMerge(addr unsafe.Pointer)
+#pragma textflag NOSPLIT
void
runtime·RaceReleaseMerge(void *addr)
{
}
// func RaceDisable()
+#pragma textflag NOSPLIT
void
runtime·RaceDisable(void)
{
}
// func RaceEnable()
+#pragma textflag NOSPLIT
void
runtime·RaceEnable(void)
{
if(--g->raceignore == 0)
runtime·racecall(__tsan_go_ignore_sync_end, g->racectx);
}
-
-typedef struct SymbolizeContext SymbolizeContext;
-struct SymbolizeContext
-{
- uintptr pc;
- int8* func;
- int8* file;
- uintptr line;
- uintptr off;
- uintptr res;
-};
-
-// Callback from C into Go, runs on g0.
-void
-runtime·racesymbolize(SymbolizeContext *ctx)
-{
- Func *f;
- String file;
-
- f = runtime·findfunc(ctx->pc);
- if(f == nil) {
- ctx->func = "??";
- ctx->file = "-";
- ctx->line = 0;
- ctx->off = ctx->pc;
- ctx->res = 1;
- return;
- }
- ctx->func = runtime·funcname(f);
- ctx->line = runtime·funcline(f, ctx->pc, &file);
- ctx->file = (int8*)file.str; // assume zero-terminated
- ctx->off = ctx->pc - f->entry;
- ctx->res = 1;
-}
if kind == kindArray || kind == kindStruct {
// for composite objects we have to read every address
// because a write might happen to any subobject.
- racereadrangepc(addr, int(t.size), callerpc, pc)
+ racereadrangepc(addr, t.size, callerpc, pc)
} else {
// for non-composite objects we can read just the start
// address, as any write must write the first byte.
if kind == kindArray || kind == kindStruct {
// for composite objects we have to write every address
// because a write might happen to any subobject.
- racewriterangepc(addr, int(t.size), callerpc, pc)
+ racewriterangepc(addr, t.size, callerpc, pc)
} else {
// for non-composite objects we can write just the start
// address, as any write must write the first byte.
racewritepc(addr, callerpc, pc)
}
}
+
+//go:noescape
+func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
+
+//go:noescape
+func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
+
+//go:noescape
+func racereadrangepc(addr unsafe.Pointer, len uintptr, callpc, pc uintptr)
+
+//go:noescape
+func racewriterangepc(addr unsafe.Pointer, len uintptr, callpc, pc uintptr)
+
+//go:noescape
+func raceacquire(addr unsafe.Pointer)
+
+//go:noescape
+func racerelease(addr unsafe.Pointer)
+
+//go:noescape
+func raceacquireg(gp *g, addr unsafe.Pointer)
+
+//go:noescape
+func racereleaseg(gp *g, addr unsafe.Pointer)
+
+func racefingo()
+
+//go:noescape
+func racemalloc(p unsafe.Pointer, size uintptr)
+
+//go:noescape
+func racereleasemerge(addr unsafe.Pointer)
+
+type symbolizeContext struct {
+ pc uintptr
+ fn *byte
+ file *byte
+ line uintptr
+ off uintptr
+ res uintptr
+}
+
+var qq = [...]byte{'?', '?', 0}
+var dash = [...]byte{'-', 0}
+
+// Callback from C into Go, runs on g0.
+func racesymbolize(ctx *symbolizeContext) {
+ f := findfunc(ctx.pc)
+ if f == nil {
+ ctx.fn = &qq[0]
+ ctx.file = &dash[0]
+ ctx.line = 0
+ ctx.off = ctx.pc
+ ctx.res = 1
+ return
+ }
+
+ ctx.fn = funcname(f)
+ var file string
+ ctx.line = uintptr(funcline(f, ctx.pc, &file))
+ ctx.file = &bytes(file)[0] // assume NUL-terminated
+ ctx.off = ctx.pc - f.entry
+ ctx.res = 1
+ return
+}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Stub implementation of the race detector API.
-// +build !race
-
-#include "runtime.h"
-
-uintptr
-runtime·raceinit(void)
-{
- return 0;
-}
-
-void
-runtime·racefini(void)
-{
-}
-
-
-void
-runtime·racemapshadow(void *addr, uintptr size)
-{
- USED(addr);
- USED(size);
-}
-
-void
-runtime·racewritepc(void *addr, void *callpc, void *pc)
-{
- USED(addr);
- USED(callpc);
- USED(pc);
-}
-
-void
-runtime·racereadpc(void *addr, void *callpc, void *pc)
-{
- USED(addr);
- USED(callpc);
- USED(pc);
-}
-
-void
-runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc)
-{
- USED(addr);
- USED(sz);
- USED(callpc);
- USED(pc);
-}
-
-void
-runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc)
-{
- USED(addr);
- USED(sz);
- USED(callpc);
- USED(pc);
-}
-
-void
-runtime·raceacquire(void *addr)
-{
- USED(addr);
-}
-
-void
-runtime·raceacquireg(G *gp, void *addr)
-{
- USED(gp);
- USED(addr);
-}
-
-void
-runtime·racerelease(void *addr)
-{
- USED(addr);
-}
-
-void
-runtime·racereleaseg(G *gp, void *addr)
-{
- USED(gp);
- USED(addr);
-}
-
-void
-runtime·racereleasemerge(void *addr)
-{
- USED(addr);
-}
-
-void
-runtime·racereleasemergeg(G *gp, void *addr)
-{
- USED(gp);
- USED(addr);
-}
-
-void
-runtime·racefingo(void)
-{
-}
-
-void
-runtime·racemalloc(void *p, uintptr sz)
-{
- USED(p);
- USED(sz);
-}
-
-uintptr
-runtime·racegostart(void *pc)
-{
- USED(pc);
- return 0;
-}
-
-void
-runtime·racegoend(void)
-{
-}
const raceenabled = false
-func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
-}
-func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
-}
+// Because raceenabled is false, none of these functions should be called.
+
+func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
+func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
+func raceinit() { gothrow("race") }
+func racefini() { gothrow("race") }
+func racemapshadow(addr unsafe.Pointer, size uintptr) { gothrow("race") }
+func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
+func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
+func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
+func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
+func raceacquire(addr unsafe.Pointer) { gothrow("race") }
+func raceacquireg(gp *g, addr unsafe.Pointer) { gothrow("race") }
+func racerelease(addr unsafe.Pointer) { gothrow("race") }
+func racereleaseg(gp *g, addr unsafe.Pointer) { gothrow("race") }
+func racereleasemerge(addr unsafe.Pointer) { gothrow("race") }
+func racereleasemergeg(gp *g, addr unsafe.Pointer) { gothrow("race") }
+func racefingo() { gothrow("race") }
+func racemalloc(p unsafe.Pointer, sz uintptr) { gothrow("race") }
+func racegostart(pc uintptr) uintptr { gothrow("race"); return 0 }
+func racegoend() { gothrow("race") }
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadrangepc(old.array, old.len*int(t.elem.size), callerpc, funcPC(growslice))
+ racereadrangepc(old.array, uintptr(old.len*int(t.elem.size)), callerpc, funcPC(growslice))
}
et := t.elem
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&to))
pc := funcPC(slicecopy)
- racewriterangepc(to.array, n*int(width), callerpc, pc)
- racereadrangepc(fm.array, n*int(width), callerpc, pc)
+ racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc)
+ racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc)
}
size := uintptr(n) * width
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&to))
pc := funcPC(slicestringcopy)
- racewriterangepc(unsafe.Pointer(&to[0]), n, callerpc, pc)
+ racewriterangepc(unsafe.Pointer(&to[0]), uintptr(n), callerpc, pc)
}
memmove(unsafe.Pointer(&to[0]), unsafe.Pointer((*stringStruct)(unsafe.Pointer(&fm)).str), uintptr(n))
func slicebytetostring(b []byte) string {
if raceenabled && len(b) > 0 {
racereadrangepc(unsafe.Pointer(&b[0]),
- len(b),
+ uintptr(len(b)),
getcallerpc(unsafe.Pointer(&b)),
funcPC(slicebytetostring))
}
if raceenabled && len(b) > 0 {
racereadrangepc(unsafe.Pointer(&b[0]),
- len(b),
+ uintptr(len(b)),
getcallerpc(unsafe.Pointer(&b)),
funcPC(slicebytetostringtmp))
}
func slicerunetostring(a []rune) string {
if raceenabled && len(a) > 0 {
racereadrangepc(unsafe.Pointer(&a[0]),
- len(a)*int(unsafe.Sizeof(a[0])),
+ uintptr(len(a))*unsafe.Sizeof(a[0]),
getcallerpc(unsafe.Pointer(&a)),
funcPC(slicerunetostring))
}
const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
const regSize = 4 << (^uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const
-//go:noescape
-func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
-
-//go:noescape
-func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
-
-//go:noescape
-func racereadrangepc(addr unsafe.Pointer, len int, callpc, pc uintptr)
-
-//go:noescape
-func racewriterangepc(addr unsafe.Pointer, len int, callpc, pc uintptr)
-
-//go:noescape
-func raceacquire(addr unsafe.Pointer)
-
-//go:noescape
-func racerelease(addr unsafe.Pointer)
-
-//go:noescape
-func racereleasemerge(addr unsafe.Pointer)
-
-//go:noescape
-func raceacquireg(gp *g, addr unsafe.Pointer)
-
-//go:noescape
-func racereleaseg(gp *g, addr unsafe.Pointer)
-
-func racefingo()
-
// Should be a built-in for unsafe.Pointer?
//go:nosplit
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
//go:noescape
func memclr(ptr unsafe.Pointer, n uintptr)
-func racemalloc(p unsafe.Pointer, size uintptr)
-
// memmove copies n bytes from "from" to "to".
// in memmove_*.s
//go:noescape