]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: fix potential memory corruption
authorDmitriy Vyukov <dvyukov@google.com>
Mon, 24 Feb 2014 16:53:20 +0000 (20:53 +0400)
committerDmitriy Vyukov <dvyukov@google.com>
Mon, 24 Feb 2014 16:53:20 +0000 (20:53 +0400)
Reinforce the guarantee that MSpan_EnsureSwept actually ensures that the span is swept.
I have not observed crashes related to this, but I do not see why it can't crash as well.

LGTM=rsc
R=golang-codereviews
CC=golang-codereviews, khr, rsc
https://golang.org/cl/67990043

src/pkg/runtime/mgc0.c
src/pkg/runtime/mheap.c

index d34ba4c026be0f77cd2e2036d168fb7a1580e51e..238a1e790e2b7bb0bcc5f0867d76091c58ad374c 100644 (file)
@@ -1694,16 +1694,19 @@ runtime·MSpan_EnsureSwept(MSpan *s)
 {
        uint32 sg;
 
+       // Caller must disable preemption.
+       // Otherwise when this function returns the span can become unswept again
+       // (if GC is triggered on another goroutine).
+       if(m->locks == 0 && m->mallocing == 0)
+               runtime·throw("MSpan_EnsureSwept: m is not locked");
+
        sg = runtime·mheap.sweepgen;
        if(runtime·atomicload(&s->sweepgen) == sg)
                return;
-       m->locks++;
        if(runtime·cas(&s->sweepgen, sg-2, sg-1)) {
                runtime·MSpan_Sweep(s);
-               m->locks--;
                return;
        }
-       m->locks--;
        // unfortunate condition, and we don't have efficient means to wait
        while(runtime·atomicload(&s->sweepgen) != sg)
                runtime·osyield();  
index 5c5a6fe1643434ec413ce39b019969ee84c9345c..ba46b6404e6456afe425004bb63a2c684adc76e4 100644 (file)
@@ -653,6 +653,7 @@ addspecial(void *p, Special *s)
 
        // Ensure that the span is swept.
        // GC accesses specials list w/o locks. And it's just much safer.
+       m->locks++;
        runtime·MSpan_EnsureSwept(span);
 
        offset = (uintptr)p - (span->start << PageShift);
@@ -665,6 +666,7 @@ addspecial(void *p, Special *s)
        while((x = *t) != nil) {
                if(offset == x->offset && kind == x->kind) {
                        runtime·unlock(&span->specialLock);
+                       m->locks--;
                        return false; // already exists
                }
                if(offset < x->offset || (offset == x->offset && kind < x->kind))
@@ -676,6 +678,7 @@ addspecial(void *p, Special *s)
        s->next = x;
        *t = s;
        runtime·unlock(&span->specialLock);
+       m->locks--;
        return true;
 }
 
@@ -695,6 +698,7 @@ removespecial(void *p, byte kind)
 
        // Ensure that the span is swept.
        // GC accesses specials list w/o locks. And it's just much safer.
+       m->locks++;
        runtime·MSpan_EnsureSwept(span);
 
        offset = (uintptr)p - (span->start << PageShift);
@@ -707,11 +711,13 @@ removespecial(void *p, byte kind)
                if(offset == s->offset && kind == s->kind) {
                        *t = s->next;
                        runtime·unlock(&span->specialLock);
+                       m->locks--;
                        return s;
                }
                t = &s->next;
        }
        runtime·unlock(&span->specialLock);
+       m->locks--;
        return nil;
 }
 
@@ -805,6 +811,8 @@ runtime·freeallspecials(MSpan *span, void *p, uintptr size)
        Special *s, **t, *list;
        uintptr offset;
 
+       if(span->sweepgen != runtime·mheap.sweepgen)
+               runtime·throw("runtime: freeallspecials: unswept span");
        // first, collect all specials into the list; then, free them
        // this is required to not cause deadlock between span->specialLock and proflock
        list = nil;