From 6e612ae0f5b527660f0e1ae497d0ad8fbb6953c2 Mon Sep 17 00:00:00 2001 From: Dmitriy Vyukov Date: Mon, 24 Feb 2014 20:53:20 +0400 Subject: [PATCH] runtime: fix potential memory corruption Reinforce the guarantee that MSpan_EnsureSwept actually ensures that the span is swept. I have not observed crashes related to this, but I do not see why it can't crash as well. LGTM=rsc R=golang-codereviews CC=golang-codereviews, khr, rsc https://golang.org/cl/67990043 --- src/pkg/runtime/mgc0.c | 9 ++++++--- src/pkg/runtime/mheap.c | 8 ++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c index d34ba4c026..238a1e790e 100644 --- a/src/pkg/runtime/mgc0.c +++ b/src/pkg/runtime/mgc0.c @@ -1694,16 +1694,19 @@ runtime·MSpan_EnsureSwept(MSpan *s) { uint32 sg; + // Caller must disable preemption. + // Otherwise when this function returns the span can become unswept again + // (if GC is triggered on another goroutine). + if(m->locks == 0 && m->mallocing == 0) + runtime·throw("MSpan_EnsureSwept: m is not locked"); + sg = runtime·mheap.sweepgen; if(runtime·atomicload(&s->sweepgen) == sg) return; - m->locks++; if(runtime·cas(&s->sweepgen, sg-2, sg-1)) { runtime·MSpan_Sweep(s); - m->locks--; return; } - m->locks--; // unfortunate condition, and we don't have efficient means to wait while(runtime·atomicload(&s->sweepgen) != sg) runtime·osyield(); diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c index 5c5a6fe164..ba46b6404e 100644 --- a/src/pkg/runtime/mheap.c +++ b/src/pkg/runtime/mheap.c @@ -653,6 +653,7 @@ addspecial(void *p, Special *s) // Ensure that the span is swept. // GC accesses specials list w/o locks. And it's just much safer. + m->locks++; runtime·MSpan_EnsureSwept(span); offset = (uintptr)p - (span->start << PageShift); @@ -665,6 +666,7 @@ addspecial(void *p, Special *s) while((x = *t) != nil) { if(offset == x->offset && kind == x->kind) { runtime·unlock(&span->specialLock); + m->locks--; return false; // already exists } if(offset < x->offset || (offset == x->offset && kind < x->kind)) @@ -676,6 +678,7 @@ addspecial(void *p, Special *s) s->next = x; *t = s; runtime·unlock(&span->specialLock); + m->locks--; return true; } @@ -695,6 +698,7 @@ removespecial(void *p, byte kind) // Ensure that the span is swept. // GC accesses specials list w/o locks. And it's just much safer. + m->locks++; runtime·MSpan_EnsureSwept(span); offset = (uintptr)p - (span->start << PageShift); @@ -707,11 +711,13 @@ removespecial(void *p, byte kind) if(offset == s->offset && kind == s->kind) { *t = s->next; runtime·unlock(&span->specialLock); + m->locks--; return s; } t = &s->next; } runtime·unlock(&span->specialLock); + m->locks--; return nil; } @@ -805,6 +811,8 @@ runtime·freeallspecials(MSpan *span, void *p, uintptr size) Special *s, **t, *list; uintptr offset; + if(span->sweepgen != runtime·mheap.sweepgen) + runtime·throw("runtime: freeallspecials: unswept span"); // first, collect all specials into the list; then, free them // this is required to not cause deadlock between span->specialLock and proflock list = nil; -- 2.48.1