runtime·goargs();
runtime·goenvs();
- // For debugging:
- // Allocate internal symbol table representation now,
- // so that we don't need to call malloc when we crash.
- // runtime·findfunc(0);
+ // Allocate internal symbol table representation now, we need it for GC anyway.
+ runtime·symtabinit();
runtime·sched.lastpoll = runtime·nanotime();
procs = 1;
void runtime·minit(void);
void runtime·unminit(void);
void runtime·signalstack(byte*, int32);
+void runtime·symtabinit(void);
Func* runtime·findfunc(uintptr);
int32 runtime·funcline(Func*, uintptr);
void* runtime·stackalloc(uint32);
static byte **fname;
static int32 nfname;
-static uint32 funcinit;
-static Lock funclock;
static uintptr lastvalue;
static void
FLUSH(&retline);
}
-static void
-buildfuncs(void)
+void
+runtime·symtabinit(void)
{
extern byte etext[];
Func *f;
int32 nf, n;
- // Use atomic double-checked locking,
- // because when called from pprof signal
- // handler, findfunc must run without
- // grabbing any locks.
- // (Before enabling the signal handler,
- // SetCPUProfileRate calls findfunc to trigger
- // the initialization outside the handler.)
- // Avoid deadlock on fault during malloc
- // by not calling buildfuncs if we're already in malloc.
- if(!m->mallocing && !m->gcing) {
- if(runtime·atomicload(&funcinit) == 0) {
- runtime·lock(&funclock);
- if(funcinit == 0) {
- buildfuncs();
- runtime·atomicstore(&funcinit, 1);
- }
- runtime·unlock(&funclock);
- }
- }
-
if(nfunc == 0)
return nil;
if(addr < func[0].entry || addr >= func[nfunc].entry)