]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: correct various Go -> C function calls
authorRuss Cox <rsc@golang.org>
Thu, 4 Sep 2014 04:54:06 +0000 (00:54 -0400)
committerRuss Cox <rsc@golang.org>
Thu, 4 Sep 2014 04:54:06 +0000 (00:54 -0400)
Some things get converted.
Other things (too complex or too many C deps) get onM calls.
Other things (too simple) get #pragma textflag NOSPLIT.

After this CL, the offending function list is basically:
        - panic.c
        - netpoll.goc
        - mem*.c
        - race stuff
        - readgstatus
        - entersyscall/exitsyscall

LGTM=r, iant
R=golang-codereviews, r, iant
CC=dvyukov, golang-codereviews, khr
https://golang.org/cl/140930043

21 files changed:
src/pkg/runtime/cpuprof.go
src/pkg/runtime/debug.go
src/pkg/runtime/malloc.c
src/pkg/runtime/malloc.go
src/pkg/runtime/malloc.h
src/pkg/runtime/mcache.c
src/pkg/runtime/mheap.c
src/pkg/runtime/mprof.go
src/pkg/runtime/os_darwin.c
src/pkg/runtime/os_nacl.c
src/pkg/runtime/os_nacl.go
src/pkg/runtime/os_windows.c
src/pkg/runtime/os_windows.go
src/pkg/runtime/proc.c
src/pkg/runtime/runtime.c
src/pkg/runtime/runtime.go [new file with mode: 0644]
src/pkg/runtime/signal_unix.c
src/pkg/runtime/signal_unix.go [new file with mode: 0644]
src/pkg/runtime/stack_test.go
src/pkg/runtime/stubs.go
src/pkg/runtime/thunk.s

index 4325d7e1c304b64e1f90341072fafd5847cefb0e..8b1c1c632752993ecb400f5312fc99fa94067fd3 100644 (file)
@@ -101,7 +101,13 @@ var (
        eod = [3]uintptr{0, 1, 0}
 )
 
-func setcpuprofilerate(int32) // proc.c
+func setcpuprofilerate_m() // proc.c
+
+func setcpuprofilerate(hz int32) {
+       g := getg()
+       g.m.scalararg[0] = uintptr(hz)
+       onM(setcpuprofilerate_m)
+}
 
 // lostProfileData is a no-op function used in profiles
 // to mark the number of profiling stack traces that were
index ed11fb1b1a40ecff1f22ca822fa937b74bc4da58..bb4bd60ed4dad3facd0f4f5da6799f010e7a76cb 100644 (file)
@@ -24,10 +24,15 @@ func UnlockOSThread()
 // The number of logical CPUs on the local machine can be queried with NumCPU.
 // This call will go away when the scheduler improves.
 func GOMAXPROCS(n int) int {
-       return int(gomaxprocsfunc(int32(n)))
+       g := getg()
+       g.m.scalararg[0] = uintptr(n)
+       onM(gomaxprocs_m)
+       n = int(g.m.scalararg[0])
+       g.m.scalararg[0] = 0
+       return n
 }
 
-func gomaxprocsfunc(int32) int32 // proc.c
+func gomaxprocs_m() // proc.c
 
 // NumCPU returns the number of logical CPUs on the local machine.
 func NumCPU() int {
index 311cc442c20e2b7459fe86b42f64a9778d5addcf..c864bc93c08d07c60f2004c56d9b39b4f7e6d04c 100644 (file)
@@ -348,58 +348,6 @@ runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
        return p;
 }
 
-static struct
-{
-       Mutex   lock;
-       byte*   pos;
-       byte*   end;
-} persistent;
-
-enum
-{
-       PersistentAllocChunk    = 256<<10,
-       PersistentAllocMaxBlock = 64<<10,  // VM reservation granularity is 64K on windows
-};
-
-// Wrapper around sysAlloc that can allocate small chunks.
-// There is no associated free operation.
-// Intended for things like function/type/debug-related persistent data.
-// If align is 0, uses default align (currently 8).
-void*
-runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat)
-{
-       byte *p;
-
-       if(align != 0) {
-               if(align&(align-1))
-                       runtime·throw("persistentalloc: align is not a power of 2");
-               if(align > PageSize)
-                       runtime·throw("persistentalloc: align is too large");
-       } else
-               align = 8;
-       if(size >= PersistentAllocMaxBlock)
-               return runtime·sysAlloc(size, stat);
-       runtime·lock(&persistent.lock);
-       persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
-       if(persistent.pos + size > persistent.end) {
-               persistent.pos = runtime·sysAlloc(PersistentAllocChunk, &mstats.other_sys);
-               if(persistent.pos == nil) {
-                       runtime·unlock(&persistent.lock);
-                       runtime·throw("runtime: cannot allocate memory");
-               }
-               persistent.end = persistent.pos + PersistentAllocChunk;
-       }
-       p = persistent.pos;
-       persistent.pos += size;
-       runtime·unlock(&persistent.lock);
-       if(stat != &mstats.other_sys) {
-               // reaccount the allocation against provided stat
-               runtime·xadd64(stat, size);
-               runtime·xadd64(&mstats.other_sys, -(uint64)size);
-       }
-       return p;
-}
-
 // Runtime stubs.
 
 static void*
index dbe37c81086c8f04df6e3ea6cee234bb745efe8b..883ca0cef79dbeba2e30dd089bc2850bfedaa484 100644 (file)
@@ -431,7 +431,7 @@ func gogc(force int32) {
        mp = acquirem()
        mp.gcing = 1
        releasem(mp)
-       stoptheworld()
+       onM(stoptheworld)
        if mp != acquirem() {
                gothrow("gogc: rescheduled")
        }
@@ -465,7 +465,7 @@ func gogc(force int32) {
        // all done
        mp.gcing = 0
        semrelease(&worldsema)
-       starttheworld()
+       onM(starttheworld)
        releasem(mp)
        mp = nil
 
@@ -760,3 +760,55 @@ func runfinq() {
                }
        }
 }
+
+var persistent struct {
+       lock mutex
+       pos  unsafe.Pointer
+       end  unsafe.Pointer
+}
+
+// Wrapper around sysAlloc that can allocate small chunks.
+// There is no associated free operation.
+// Intended for things like function/type/debug-related persistent data.
+// If align is 0, uses default align (currently 8).
+func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
+       const (
+               chunk    = 256 << 10
+               maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
+       )
+
+       if align != 0 {
+               if align&(align-1) != 0 {
+                       gothrow("persistentalloc: align is not a power of 2")
+               }
+               if align > _PageSize {
+                       gothrow("persistentalloc: align is too large")
+               }
+       } else {
+               align = 8
+       }
+
+       if size >= maxBlock {
+               return sysAlloc(size, stat)
+       }
+
+       lock(&persistent.lock)
+       persistent.pos = roundup(persistent.pos, align)
+       if uintptr(persistent.pos)+size > uintptr(persistent.end) {
+               persistent.pos = sysAlloc(chunk, &memstats.other_sys)
+               if persistent.pos == nil {
+                       unlock(&persistent.lock)
+                       gothrow("runtime: cannot allocate memory")
+               }
+               persistent.end = add(persistent.pos, chunk)
+       }
+       p := persistent.pos
+       persistent.pos = add(persistent.pos, size)
+       unlock(&persistent.lock)
+
+       if stat != &memstats.other_sys {
+               xadd64(stat, int64(size))
+               xadd64(&memstats.other_sys, -int64(size))
+       }
+       return p
+}
index d9a2bf84bad4bf8129e6e9c728c91aeb30b70492..54416919403abb3b4952e3c0e4cdf1df91abaee6 100644 (file)
@@ -578,7 +578,7 @@ extern bool runtime·fingwake;
 extern FinBlock        *runtime·finq;         // list of finalizers that are to be executed
 extern FinBlock        *runtime·finc;         // cache of free blocks
 
-void   runtime·setprofilebucket(void *p, Bucket *b);
+void   runtime·setprofilebucket_m(void);
 
 bool   runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*);
 void   runtime·removefinalizer(void*);
index 8e98890e8e3f0e50b6ae72c7129461900078aaa1..bb1fc54032388f7affd493f9a44435b1674c720d 100644 (file)
@@ -52,24 +52,23 @@ freemcache(MCache *c)
 }
 
 static void
-freemcache_m(G *gp)
+freemcache_m(void)
 {
        MCache *c;
 
        c = g->m->ptrarg[0];
        g->m->ptrarg[0] = nil;
        freemcache(c);
-       runtime·gogo(&gp->sched);
 }
 
 void
 runtime·freemcache(MCache *c)
 {
-       void (*fn)(G*);
+       void (*fn)(void);
 
        g->m->ptrarg[0] = c;
        fn = freemcache_m;
-       runtime·mcall(&fn);
+       runtime·onM(&fn);
 }
 
 // Gets a span that has a free object in it and assigns it
index 0050e96556a9c4ae98267c71aaff9512d10cc1c9..902a5c71a20e5d9c3d7af360c8e85ab3809ff3ba 100644 (file)
@@ -834,9 +834,16 @@ runtime·removefinalizer(void *p)
 
 // Set the heap profile bucket associated with addr to b.
 void
-runtime·setprofilebucket(void *p, Bucket *b)
-{
+runtime·setprofilebucket_m(void)
+{      
+       void *p;
+       Bucket *b;
        SpecialProfile *s;
+       
+       p = g->m->ptrarg[0];
+       b = g->m->ptrarg[1];
+       g->m->ptrarg[0] = nil;
+       g->m->ptrarg[1] = nil;
 
        runtime·lock(&runtime·mheap.speciallock);
        s = runtime·FixAlloc_Alloc(&runtime·mheap.specialprofilealloc);
index b421c4f98fe94f2c740233011cabd63c220f08e1..7177c845923b59ae6a6270fb4a68d5a0b8eda7c4 100644 (file)
@@ -249,7 +249,14 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) {
        setprofilebucket(p, b)
 }
 
-func setprofilebucket(p unsafe.Pointer, b *bucket) // mheap.c
+func setprofilebucket_m() // mheap.c
+
+func setprofilebucket(p unsafe.Pointer, b *bucket) {
+       g := getg()
+       g.m.ptrarg[0] = p
+       g.m.ptrarg[1] = unsafe.Pointer(b)
+       onM(setprofilebucket_m)
+}
 
 // Called when freeing a profiled block.
 func mProf_Free(b *bucket, size uintptr, freed bool) {
@@ -288,8 +295,7 @@ func SetBlockProfileRate(rate int) {
        atomicstore64(&blockprofilerate, uint64(r))
 }
 
-func tickspersecond() int64 // runtime.c
-func fastrand1() uint32     // runtime.c
+func fastrand1() uint32     // assembly
 func readgstatus(*g) uint32 // proc.c
 
 func blockevent(cycles int64, skip int) {
@@ -531,7 +537,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
                gp := getg()
                semacquire(&worldsema, false)
                gp.m.gcing = 1
-               stoptheworld()
+               onM(stoptheworld)
 
                n = NumGoroutine()
                if n <= len(p) {
@@ -550,7 +556,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
 
                gp.m.gcing = 0
                semrelease(&worldsema)
-               starttheworld()
+               onM(starttheworld)
        }
 
        return n, ok
@@ -576,7 +582,7 @@ func Stack(buf []byte, all bool) int {
                semacquire(&worldsema, false)
                mp.gcing = 1
                releasem(mp)
-               stoptheworld()
+               onM(stoptheworld)
                if mp != acquirem() {
                        gothrow("Stack: rescheduled")
                }
@@ -597,7 +603,7 @@ func Stack(buf []byte, all bool) int {
        if all {
                mp.gcing = 0
                semrelease(&worldsema)
-               starttheworld()
+               onM(starttheworld)
        }
        releasem(mp)
        return n
index bf13cdbafec78b4c325ed4caef7e8d7ef7923e86..19181d60dff1e30a453d310921d6d2008287b16a 100644 (file)
@@ -22,16 +22,31 @@ unimplemented(int8 *name)
        *(int32*)1231 = 1231;
 }
 
+#pragma textflag NOSPLIT
 void
 runtime·semawakeup(M *mp)
 {
        runtime·mach_semrelease(mp->waitsema);
 }
 
+static void
+semacreate(void)
+{
+       g->m->scalararg[0] = runtime·mach_semcreate();
+}
+
+#pragma textflag NOSPLIT
 uintptr
 runtime·semacreate(void)
 {
-       return runtime·mach_semcreate();
+       uintptr x;
+       void (*fn)(void);
+       
+       fn = semacreate;
+       runtime·onM(&fn);
+       x = g->m->scalararg[0];
+       g->m->scalararg[0] = 0;
+       return x;
 }
 
 // BSD interface for threading.
@@ -143,7 +158,6 @@ runtime·unminit(void)
 // Mach IPC, to get at semaphores
 // Definitions are in /usr/include/mach on a Mac.
 
-#pragma textflag NOSPLIT
 static void
 macherror(int32 r, int8 *fn)
 {
@@ -398,38 +412,88 @@ int32 runtime·mach_semaphore_timedwait(uint32 sema, uint32 sec, uint32 nsec);
 int32 runtime·mach_semaphore_signal(uint32 sema);
 int32 runtime·mach_semaphore_signal_all(uint32 sema);
 
-#pragma textflag NOSPLIT
-int32
-runtime·semasleep(int64 ns)
+static void
+semasleep(void)
 {
        int32 r, secs, nsecs;
+       int64 ns;
+       
+       ns = g->m->scalararg[0] | g->m->scalararg[1]<<32;
+       g->m->scalararg[0] = 0;
+       g->m->scalararg[1] = 0;
 
        if(ns >= 0) {
                secs = runtime·timediv(ns, 1000000000, &nsecs);
                r = runtime·mach_semaphore_timedwait(g->m->waitsema, secs, nsecs);
-               if(r == KERN_ABORTED || r == KERN_OPERATION_TIMED_OUT)
-                       return -1;
+               if(r == KERN_ABORTED || r == KERN_OPERATION_TIMED_OUT) {
+                       g->m->scalararg[0] = -1;
+                       return;
+               }
                if(r != 0)
                        macherror(r, "semaphore_wait");
-               return 0;
+               g->m->scalararg[0] = 0;
+               return;
        }
        while((r = runtime·mach_semaphore_wait(g->m->waitsema)) != 0) {
                if(r == KERN_ABORTED)   // interrupted
                        continue;
                macherror(r, "semaphore_wait");
        }
-       return 0;
+       g->m->scalararg[0] = 0;
+       return;
 }
 
+#pragma textflag NOSPLIT
+int32
+runtime·semasleep(int64 ns)
+{
+       int32 r;
+       void (*fn)(void);
+
+       g->m->scalararg[0] = (uint32)ns;
+       g->m->scalararg[1] = (uint32)(ns>>32);
+       fn = semasleep;
+       runtime·onM(&fn);
+       r = g->m->scalararg[0];
+       g->m->scalararg[0] = 0;
+       return r;
+}
+
+static int32 mach_semrelease_errno;
+
+static void
+mach_semrelease_fail(void)
+{
+       macherror(mach_semrelease_errno, "semaphore_signal");
+}
+
+#pragma textflag NOSPLIT
 void
 runtime·mach_semrelease(uint32 sem)
 {
        int32 r;
+       void (*fn)(void);
 
        while((r = runtime·mach_semaphore_signal(sem)) != 0) {
                if(r == KERN_ABORTED)   // interrupted
                        continue;
-               macherror(r, "semaphore_signal");
+               
+               // mach_semrelease must be completely nosplit,
+               // because it is called from Go code.
+               // If we're going to die, start that process on the m stack
+               // to avoid a Go stack split.
+               // Only do that if we're actually running on the g stack.
+               // We might be on the gsignal stack, and if so, onM will abort.
+               // We use the global variable instead of scalararg because
+               // we might be on the gsignal stack, having interrupted a
+               // normal call to onM. It doesn't quite matter, since the
+               // program is about to die, but better to be clean.
+               mach_semrelease_errno = r;
+               fn = mach_semrelease_fail;
+               if(g == g->m->curg)
+                       runtime·onM(&fn);
+               else
+                       fn();
        }
 }
 
index b3e0fc663695201f8a705f536b5251b87a30525f..f859dd0d35086f2da72a404f7ed69a254b445412 100644 (file)
@@ -196,12 +196,6 @@ runtime·semawakeup(M *mp)
        runtime·nacl_mutex_unlock(mp->waitsemalock);
 }
 
-void
-os·sigpipe(void)
-{
-       runtime·throw("too many writes on closed pipe");
-}
-
 uintptr
 runtime·memlimit(void)
 {
index 2ab51b8b1d9eb8988eaa3b0d6b72969e3bcbeb05..12a15aea0ddee950c3962203c7331ef586506cbb 100644 (file)
@@ -24,3 +24,7 @@ func nacl_thread_create(fn, stk, tls, xx unsafe.Pointer) int32
 func nacl_nanosleep(ts, extra unsafe.Pointer) int32
 
 const stackSystem = 0
+
+func os_sigpipe() {
+       gothrow("too many writes on closed pipe")
+}
index 172fd92285c2cd1210c63e12f255a1f2e3e1c787..0e53d8a31cb59d720b154d19d8a467943162dfb4 100644 (file)
@@ -588,12 +588,6 @@ runtime·resetcpuprofiler(int32 hz)
        runtime·atomicstore((uint32*)&g->m->profilehz, hz);
 }
 
-void
-os·sigpipe(void)
-{
-       runtime·throw("too many writes on closed pipe");
-}
-
 uintptr
 runtime·memlimit(void)
 {
index 57bd431f78579d66012e10baa228dac3e385b519..15957b3290137c1e95885111e54b4f40fe7fd19f 100644 (file)
@@ -23,3 +23,7 @@ func setlasterror(err uint32)
 func usleep1(usec uint32)
 
 const stackSystem = 512 * ptrSize
+
+func os_sigpipe() {
+       gothrow("too many writes on closed pipe")
+}
index dfbc6142d6af2ceebb7ea5250eacae26b1caeafa..3e7b07c2c5aeb07545014177ad07be4c3a19329a 100644 (file)
@@ -761,10 +761,7 @@ runtime·stoptheworld(void)
        // that is blocked trying to acquire the lock.
        if(g->m->locks > 0)
                runtime·throw("stoptheworld: holding locks");
-       // There is no evidence that stoptheworld on g0 does not work,
-       // we just don't do it today.
-       if(g == g->m->g0)
-               runtime·throw("stoptheworld: on g0");
+
        runtime·lock(&runtime·sched.lock);
        runtime·sched.stopwait = runtime·gomaxprocs;
        runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
@@ -2085,42 +2082,65 @@ exitsyscall0(G *gp)
        schedule();  // Never returns.
 }
 
-// Called from syscall package before fork.
-#pragma textflag NOSPLIT
-void
-syscall·runtime_BeforeFork(void)
+static void
+beforefork(void)
 {
+       G *gp;
+       
+       gp = g->m->curg;
        // Fork can hang if preempted with signals frequently enough (see issue 5517).
        // Ensure that we stay on the same M where we disable profiling.
-       g->m->locks++;
-       if(g->m->profilehz != 0)
+       gp->m->locks++;
+       if(gp->m->profilehz != 0)
                runtime·resetcpuprofiler(0);
 
        // This function is called before fork in syscall package.
        // Code between fork and exec must not allocate memory nor even try to grow stack.
        // Here we spoil g->stackguard to reliably detect any attempts to grow stack.
        // runtime_AfterFork will undo this in parent process, but not in child.
-       g->m->forkstackguard = g->stackguard;
-       g->stackguard0 = StackPreempt-1;
-       g->stackguard = StackPreempt-1;
+       gp->m->forkstackguard = gp->stackguard;
+       gp->stackguard0 = StackPreempt-1;
+       gp->stackguard = StackPreempt-1;
 }
 
-// Called from syscall package after fork in parent.
+// Called from syscall package before fork.
 #pragma textflag NOSPLIT
 void
-syscall·runtime_AfterFork(void)
+syscall·runtime_BeforeFork(void)
 {
-       int32 hz;
+       void (*fn)(void);
+       
+       fn = beforefork;
+       runtime·onM(&fn);
+}
 
+static void
+afterfork(void)
+{
+       int32 hz;
+       G *gp;
+       
+       gp = g->m->curg;
        // See the comment in runtime_BeforeFork.
-       g->stackguard0 = g->m->forkstackguard;
-       g->stackguard = g->m->forkstackguard;
-       g->m->forkstackguard = 0;
+       gp->stackguard0 = gp->m->forkstackguard;
+       gp->stackguard = gp->m->forkstackguard;
+       gp->m->forkstackguard = 0;
 
        hz = runtime·sched.profilehz;
        if(hz != 0)
                runtime·resetcpuprofiler(hz);
-       g->m->locks--;
+       gp->m->locks--;
+}
+
+// Called from syscall package after fork in parent.
+#pragma textflag NOSPLIT
+void
+syscall·runtime_AfterFork(void)
+{
+       void (*fn)(void);
+       
+       fn = afterfork;
+       runtime·onM(&fn);
 }
 
 // Hook used by runtime·malg to call runtime·stackalloc on the
@@ -2453,10 +2473,13 @@ runtime·Breakpoint(void)
 
 // Implementation of runtime.GOMAXPROCS.
 // delete when scheduler is even stronger
-int32
-runtime·gomaxprocsfunc(int32 n)
+void
+runtime·gomaxprocs_m(void)
 {
-       int32 ret;
+       int32 n, ret;
+       
+       n = g->m->scalararg[0];
+       g->m->scalararg[0] = 0;
 
        if(n > MaxGomaxprocs)
                n = MaxGomaxprocs;
@@ -2464,7 +2487,8 @@ runtime·gomaxprocsfunc(int32 n)
        ret = runtime·gomaxprocs;
        if(n <= 0 || n == ret) {
                runtime·unlock(&runtime·sched.lock);
-               return ret;
+               g->m->scalararg[0] = ret;
+               return;
        }
        runtime·unlock(&runtime·sched.lock);
 
@@ -2476,7 +2500,8 @@ runtime·gomaxprocsfunc(int32 n)
        runtime·semrelease(&runtime·worldsema);
        runtime·starttheworld();
 
-       return ret;
+       g->m->scalararg[0] = ret;
+       return;
 }
 
 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
@@ -2540,6 +2565,7 @@ runtime·lockedOSThread(void)
        return g->lockedm != nil && g->m->lockedg != nil;
 }
 
+#pragma textflag NOSPLIT
 int32
 runtime·gcount(void)
 {
@@ -2737,8 +2763,13 @@ runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
 
 // Arrange to call fn with a traceback hz times a second.
 void
-runtime·setcpuprofilerate(int32 hz)
+runtime·setcpuprofilerate_m(void)
 {
+       int32 hz;
+       
+       hz = g->m->scalararg[0];
+       g->m->scalararg[0] = 0;
+
        // Force sane arguments.
        if(hz < 0)
                hz = 0;
index b0adfb601b71e1bb0efe9291e495ca07f2bb4a08..751181274a7878e4ce442f01dd3222775b999fae 100644 (file)
@@ -20,6 +20,7 @@ static uint32 traceback_cache = 2<<1;
 //     GOTRACEBACK=1   default behavior - show tracebacks but exclude runtime frames
 //     GOTRACEBACK=2   show tracebacks including runtime frames
 //     GOTRACEBACK=crash   show tracebacks including runtime frames, then crash (core dump etc)
+#pragma textflag NOSPLIT
 int32
 runtime·gotraceback(bool *crash)
 {
@@ -266,37 +267,6 @@ runtime·check(void)
                runtime·throw("FixedStack is not power-of-2");
 }
 
-static Mutex ticksLock;
-static int64 ticks;
-
-// Note: Called by runtime/pprof in addition to runtime code.
-int64
-runtime·tickspersecond(void)
-{
-       int64 res, t0, t1, c0, c1;
-
-       res = (int64)runtime·atomicload64((uint64*)&ticks);
-       if(res != 0)
-               return ticks;
-       runtime·lock(&ticksLock);
-       res = ticks;
-       if(res == 0) {
-               t0 = runtime·nanotime();
-               c0 = runtime·cputicks();
-               runtime·usleep(100*1000);
-               t1 = runtime·nanotime();
-               c1 = runtime·cputicks();
-               if(t1 == t0)
-                       t1++;
-               res = (c1-c0)*1000*1000*1000/(t1-t0);
-               if(res == 0)
-                       res++;
-               runtime·atomicstore64((uint64*)&ticks, res);
-       }
-       runtime·unlock(&ticksLock);
-       return res;
-}
-
 #pragma dataflag NOPTR
 DebugVars      runtime·debug;
 
diff --git a/src/pkg/runtime/runtime.go b/src/pkg/runtime/runtime.go
new file mode 100644 (file)
index 0000000..d5b3155
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2009 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+var ticks struct {
+       lock mutex
+       val  uint64
+}
+
+// Note: Called by runtime/pprof in addition to runtime code.
+func tickspersecond() int64 {
+       r := int64(atomicload64(&ticks.val))
+       if r != 0 {
+               return r
+       }
+       lock(&ticks.lock)
+       r = int64(ticks.val)
+       if r == 0 {
+               t0 := nanotime()
+               c0 := cputicks()
+               usleep(100 * 1000)
+               t1 := nanotime()
+               c1 := cputicks()
+               if t1 == t0 {
+                       t1++
+               }
+               r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0)
+               if r == 0 {
+                       r++
+               }
+               atomicstore64(&ticks.val, uint64(r))
+       }
+       unlock(&ticks.lock)
+       return r
+}
index 4d582d3f8cba3b6f41228abe82b7c3bccac389ec..0e33ece494bef402f67c611c5fb8989268679fe2 100644 (file)
@@ -93,7 +93,7 @@ runtime·resetcpuprofiler(int32 hz)
 }
 
 void
-os·sigpipe(void)
+runtime·sigpipe(void)
 {
        runtime·setsig(SIGPIPE, SIG_DFL, false);
        runtime·raise(SIGPIPE);
diff --git a/src/pkg/runtime/signal_unix.go b/src/pkg/runtime/signal_unix.go
new file mode 100644 (file)
index 0000000..ba77b6e
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright 2012 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package runtime
+
+func sigpipe()
+
+func os_sigpipe() {
+       onM(sigpipe)
+}
index b3dcbd12a0c24fc53e5848289cb1af21126fb4e2..a822d73db4499e85270f7dae0499a4628cb29a01 100644 (file)
@@ -341,3 +341,12 @@ func TestStackOutput(t *testing.T) {
                t.Errorf("Stack output should begin with \"goroutine \"")
        }
 }
+
+func TestStackAllOutput(t *testing.T) {
+       b := make([]byte, 1024)
+       stk := string(b[:Stack(b, true)])
+       if !strings.HasPrefix(stk, "goroutine ") {
+               t.Errorf("Stack (len %d):\n%s", len(stk), stk)
+               t.Errorf("Stack output should begin with \"goroutine \"")
+       }
+}
index 287b3df05d955175223bae5e4512782729aa065d..3719c7501c3b199516fff3c3fc3adb64097c84da 100644 (file)
@@ -202,7 +202,6 @@ func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
 func procyield(cycles uint32)
 func osyield()
 func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
-func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer
 func readgogc() int32
 func purgecachedstats(c *mcache)
 func gostringnocopy(b *byte) string
index 048b7a723678faa003e28d4e2010cfc4d91d7000..0b5963e70ad148e0981dd26b802bd4b37fbcf166 100644 (file)
@@ -118,3 +118,6 @@ TEXT reflect·makechan(SB),NOSPLIT,$0-0
 
 TEXT reflect·rselect(SB), NOSPLIT, $0-0
        JMP     runtime·reflect_rselect(SB)
+
+TEXT os·sigpipe(SB), NOSPLIT, $0-0
+       JMP     runtime·os_sigpipe(SB)