]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: fix windows build
authorDmitriy Vyukov <dvyukov@google.com>
Mon, 27 Jan 2014 20:26:56 +0000 (00:26 +0400)
committerDmitriy Vyukov <dvyukov@google.com>
Mon, 27 Jan 2014 20:26:56 +0000 (00:26 +0400)
Currently windows crashes because early allocs in schedinit
try to allocate tiny memory blocks, but m->p is not yet setup.
I've considered calling procresize(1) earlier in schedinit,
but this refactoring is better and must fix the issue as well.
Fixes #7218.

R=golang-codereviews, r
CC=golang-codereviews
https://golang.org/cl/54570045

src/pkg/runtime/malloc.goc
src/pkg/runtime/malloc.h
src/pkg/runtime/mgc0.c
src/pkg/runtime/runtime.h

index 280a0a2a8f2fafc7dd25161cc1c4bf5bc77d4313..4e554a1f92ca4b9a4a1ede0e695a0ecc75fc2ed3 100644 (file)
@@ -42,7 +42,6 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
        MCacheList *l;
        MLink *v;
        byte *tiny;
-       P *p;
 
        if(size == 0) {
                // All 0-length allocations use this pointer.
@@ -93,10 +92,9 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
                        // the allocator reduces number of allocations by ~12% and
                        // reduces heap size by ~20%.
 
-                       p = m->p;
-                       tinysize = p->tinysize;
+                       tinysize = c->tinysize;
                        if(size <= tinysize) {
-                               tiny = p->tiny;
+                               tiny = c->tiny;
                                // Align tiny pointer for required (conservative) alignment.
                                if((size&7) == 0)
                                        tiny = (byte*)ROUND((uintptr)tiny, 8);
@@ -104,12 +102,12 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
                                        tiny = (byte*)ROUND((uintptr)tiny, 4);
                                else if((size&1) == 0)
                                        tiny = (byte*)ROUND((uintptr)tiny, 2);
-                               size1 = size + (tiny - p->tiny);
+                               size1 = size + (tiny - c->tiny);
                                if(size1 <= tinysize) {
                                        // The object fits into existing tiny block.
                                        v = (MLink*)tiny;
-                                       p->tiny += size1;
-                                       p->tinysize -= size1;
+                                       c->tiny += size1;
+                                       c->tinysize -= size1;
                                        m->mallocing = 0;
                                        m->locks--;
                                        if(m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
@@ -129,8 +127,8 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
                        // See if we need to replace the existing tiny block with the new one
                        // based on amount of remaining free space.
                        if(TinySize-size > tinysize) {
-                               p->tiny = (byte*)v + size;
-                               p->tinysize = TinySize - size;
+                               c->tiny = (byte*)v + size;
+                               c->tinysize = TinySize - size;
                        }
                        size = TinySize;
                        goto done;
index 4146299223bed7915097978d611e026807865681..52a23e391cf473cd823d15a758d27fb7789a4495 100644 (file)
@@ -296,6 +296,10 @@ struct MCache
        // so they are grouped here for better caching.
        int32 next_sample;              // trigger heap sample after allocating this many bytes
        intptr local_cachealloc;        // bytes allocated (or freed) from cache since last lock of heap
+       // Allocator cache for tiny objects w/o pointers.
+       // See "Tiny allocator" comment in malloc.goc.
+       byte*   tiny;
+       uintptr tinysize;
        // The rest is not accessed on every malloc.
        MCacheList list[NumSizeClasses];
        // Local allocator stats, flushed during GC.
index 609dbfece120f3b488e0e99b000701c6698e2919..e21ad286dae7d94e012adb016f54e3642a01f826 100644 (file)
@@ -68,6 +68,7 @@ clearpools(void)
 {
        void **pool, **next;
        P *p, **pp;
+       MCache *c;
        uintptr off;
        int32 i;
 
@@ -86,8 +87,11 @@ clearpools(void)
 
        for(pp=runtime·allp; p=*pp; pp++) {
                // clear tinyalloc pool
-               p->tiny = nil;
-               p->tinysize = 0;
+               c = p->mcache;
+               if(c != nil) {
+                       c->tiny = nil;
+                       c->tinysize = 0;
+               }
                // clear defer pools
                for(i=0; i<nelem(p->deferpool); i++)
                        p->deferpool[i] = nil;
index 499983fd78e27d088a93c5c8bdedd5367a7158eb..13fb55454754e943b94a4b361f93557e573235a7 100644 (file)
@@ -385,11 +385,6 @@ struct P
        MCache* mcache;
        Defer*  deferpool[5];   // pool of available Defer structs of different sizes (see panic.c)
 
-       // Allocator cache for tiny objects w/o pointers.
-       // See "Tiny allocator" comment in malloc.goc.
-       byte*   tiny;
-       uintptr tinysize;
-
        // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
        uint64  goidcache;
        uint64  goidcacheend;