}
func GC() {
- // We assume that the user expects unused memory to have
- // been freed when GC returns. To ensure this, run gc(1) twice.
- // The first will do a collection, and the second will force the
- // first's sweeping to finish before doing a second collection.
- // The second collection is overkill, but we assume the user
- // has a good reason for calling runtime.GC and can stand the
- // expense. At the least, this fixes all the calls to runtime.GC in
- // tests that expect finalizers to start running when GC returns.
- runtime·gc(1);
- runtime·gc(1);
+ runtime·gc(2); // force GC and do eager sweep
}
func SetFinalizer(obj Eface, finalizer Eface) {
struct gc_args
{
int64 start_time; // start time of GC in ns (just before stoptheworld)
+ bool eagersweep;
};
static void gc(struct gc_args *args);
return runtime·atoi(p);
}
+// force = 1 - do GC regardless of current heap usage
+// force = 2 - go GC and eager sweep
void
runtime·gc(int32 force)
{
return;
runtime·semacquire(&runtime·worldsema, false);
- if(!force && mstats.heap_alloc < mstats.next_gc) {
+ if(force==0 && mstats.heap_alloc < mstats.next_gc) {
// typically threads which lost the race to grab
// worldsema exit here when gc is done.
runtime·semrelease(&runtime·worldsema);
// Ok, we're doing it! Stop everybody else
a.start_time = runtime·nanotime();
+ a.eagersweep = force >= 2;
m->gcing = 1;
runtime·stoptheworld();
sweep.spanidx = 0;
// Temporary disable concurrent sweep, because we see failures on builders.
- if(ConcurrentSweep) {
+ if(ConcurrentSweep && !args->eagersweep) {
runtime·lock(&gclock);
if(sweep.g == nil)
sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, runtime·gc);
void
runtime∕debug·freeOSMemory(void)
{
- runtime·gc(1);
+ runtime·gc(2); // force GC and do eager sweep
runtime·lock(&runtime·mheap);
scavenge(-1, ~(uintptr)0, 0);
runtime·unlock(&runtime·mheap);