]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: clean up GC code
authorDmitriy Vyukov <dvyukov@google.com>
Fri, 29 Aug 2014 14:44:38 +0000 (18:44 +0400)
committerDmitriy Vyukov <dvyukov@google.com>
Fri, 29 Aug 2014 14:44:38 +0000 (18:44 +0400)
Remove C version of GC.
Convert freeOSMemory to Go.
Restore g0 check in GC.
Remove unknownGCPercent check in GC,
it's initialized explicitly now.

LGTM=rsc
R=golang-codereviews, rsc
CC=golang-codereviews, khr
https://golang.org/cl/139910043

src/pkg/runtime/malloc.go
src/pkg/runtime/malloc.h
src/pkg/runtime/mgc0.c
src/pkg/runtime/mgc0.go
src/pkg/runtime/mheap.c
src/pkg/runtime/stubs.go
src/pkg/runtime/thunk.s

index 7f344c9164e4b47422c660482c5bd256c970d7a3..e95bdbbf97871b022a7fecb641853f75fe18b57e 100644 (file)
@@ -407,33 +407,19 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
 // force = 1 - do GC regardless of current heap usage
 // force = 2 - go GC and eager sweep
 func gogc(force int32) {
-       if !memstats.enablegc {
-               return
-       }
-
-       // TODO: should never happen?  Only C calls malloc while holding a lock?
+       // The gc is turned off (via enablegc) until the bootstrap has completed.
+       // Also, malloc gets called in the guts of a number of libraries that might be
+       // holding locks. To avoid deadlocks during stoptheworld, don't bother
+       // trying to run gc while holding a lock. The next mallocgc without a lock
+       // will do the gc instead.
        mp := acquirem()
-       if mp.locks > 1 {
+       if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 {
                releasem(mp)
                return
        }
        releasem(mp)
        mp = nil
 
-       if panicking != 0 {
-               return
-       }
-       if gcpercent == gcpercentUnknown {
-               lock(&mheap_.lock)
-               if gcpercent == gcpercentUnknown {
-                       gcpercent = readgogc()
-               }
-               unlock(&mheap_.lock)
-       }
-       if gcpercent < 0 {
-               return
-       }
-
        semacquire(&worldsema, false)
 
        if force == 0 && memstats.heap_alloc < memstats.next_gc {
index 19ea846dd1e3c7acfb777ed8945f0d8f559d1636..6cd72fb31f6b995a0eef5da912d62df0a1cc1622 100644 (file)
@@ -519,7 +519,6 @@ void        runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit);
 
 void*  runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat);
 int32  runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s);
-void   runtime·gc(int32 force);
 uintptr        runtime·sweepone(void);
 void   runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
 void   runtime·unmarkspan(void *v, uintptr size);
index 03eb2d98669fd85b185fb62506cb0c4b8217c0ef..09be02b71e7191e2dbb35c3b0aa7cc4d2bd63ea6 100644 (file)
@@ -1284,82 +1284,6 @@ runtime·gcinit(void)
        runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss);
 }
 
-// force = 1 - do GC regardless of current heap usage
-// force = 2 - go GC and eager sweep
-void
-runtime·gc(int32 force)
-{
-       struct gc_args a;
-       int32 i;
-
-       // The gc is turned off (via enablegc) until
-       // the bootstrap has completed.
-       // Also, malloc gets called in the guts
-       // of a number of libraries that might be
-       // holding locks.  To avoid priority inversion
-       // problems, don't bother trying to run gc
-       // while holding a lock.  The next mallocgc
-       // without a lock will do the gc instead.
-       if(!mstats.enablegc || g == g->m->g0 || g->m->locks > 0 || runtime·panicking)
-               return;
-
-       if(runtime·gcpercent < 0)
-               return;
-
-       runtime·semacquire(&runtime·worldsema, false);
-       if(force==0 && mstats.heap_alloc < mstats.next_gc) {
-               // typically threads which lost the race to grab
-               // worldsema exit here when gc is done.
-               runtime·semrelease(&runtime·worldsema);
-               return;
-       }
-
-       // Ok, we're doing it!  Stop everybody else
-       a.start_time = runtime·nanotime();
-       a.eagersweep = force >= 2;
-       g->m->gcing = 1;
-       runtime·stoptheworld();
-       
-       runtime·clearpools();
-
-       // Run gc on the g0 stack.  We do this so that the g stack
-       // we're currently running on will no longer change.  Cuts
-       // the root set down a bit (g0 stacks are not scanned, and
-       // we don't need to scan gc's internal state).  Also an
-       // enabler for copyable stacks.
-       for(i = 0; i < (runtime·debug.gctrace > 1 ? 2 : 1); i++) {
-               if(i > 0)
-                       a.start_time = runtime·nanotime();
-               // switch to g0, call gc(&a), then switch back
-               g->param = &a;
-               runtime·casgstatus(g, Grunning, Gwaiting);
-               g->waitreason = runtime·gostringnocopy((byte*)"garbage collection");
-               runtime·mcall(mgc);
-       }
-
-       // all done
-       g->m->gcing = 0;
-       g->m->locks++;
-       runtime·semrelease(&runtime·worldsema);
-       runtime·starttheworld();
-       g->m->locks--;
-
-       // now that gc is done, kick off finalizer thread if needed
-       if(!ConcurrentSweep) {
-               // give the queued finalizers, if any, a chance to run
-               runtime·gosched();
-       }
-}
-
-static void
-mgc(G *gp)
-{
-       gc(gp->param);
-       gp->param = nil;
-       runtime·casgstatus(gp, Gwaiting, Grunning);
-       runtime·gogo(&gp->sched);
-}
-
 void
 runtime·gc_m(void)
 {
@@ -1502,7 +1426,7 @@ gc(struct gc_args *args)
        if(ConcurrentSweep && !args->eagersweep) {
                runtime·lock(&gclock);
                if(sweep.g == nil)
-                       sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, runtime·gc);
+                       sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, gc);
                else if(sweep.parked) {
                        sweep.parked = false;
                        runtime·ready(sweep.g);
index 496725f6a768ebd209f8d29c045d9874e727f65c..275c7ed676badbab7208fb7052839de5fb868d9e 100644 (file)
@@ -34,3 +34,8 @@ func gc_unixnanotime(now *int64) {
        sec, nsec := timenow()
        *now = sec*1e9 + int64(nsec)
 }
+
+func freeOSMemory() {
+       gogc(2) // force GC and do eager sweep
+       onM(&scavenge_m)
+}
index 8bfb41ac675d1d7eb3b2dc5432cbece8a29eb1e2..90acd55f9f178676809a96f26fffa4783bc4eb1c 100644 (file)
@@ -622,19 +622,10 @@ runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit)
        }
 }
 
-static void
-scavenge_m(G *gp)
-{
-       runtime·MHeap_Scavenge(-1, ~(uintptr)0, 0);
-       runtime·gogo(&gp->sched);
-}
-
 void
-runtime∕debug·freeOSMemory(void)
+runtime·scavenge_m(void)
 {
-       runtime·gc(2);  // force GC and do eager sweep
-
-       runtime·mcall(scavenge_m);
+       runtime·MHeap_Scavenge(-1, ~(uintptr)0, 0);
 }
 
 // Initialize a new span with the given start and npages.
index 2014dfbf909dead98f6deb08eabffafcaff4b53c..9e5a2cf04ae5ef7977c39d52a6d48109a41e1425 100644 (file)
@@ -78,6 +78,7 @@ var (
        largeAlloc_m,
        mprofMalloc_m,
        gc_m,
+       scavenge_m,
        setFinalizer_m,
        removeFinalizer_m,
        markallocated_m,
@@ -111,8 +112,7 @@ func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
 func fastrand2() uint32
 
 const (
-       gcpercentUnknown = -2
-       concurrentSweep  = true
+       concurrentSweep = true
 )
 
 func gosched()
index 6093656c6f9779e32b8fab5e43f119e193b61855..997a4febc7afeebb61e76f4f9bc74ab14a066363 100644 (file)
@@ -61,3 +61,6 @@ TEXT reflect·chanlen(SB), NOSPLIT, $0-0
 
 TEXT reflect·chancap(SB), NOSPLIT, $0-0
        JMP     runtime·reflect_chancap(SB)
+
+TEXT runtime∕debug·freeOSMemory(SB), NOSPLIT, $0-0
+       JMP     runtime·freeOSMemory(SB)