syscall·setenv_c(String k, String v)
{
byte *arg[2];
- uintptr len;
if(_cgo_setenv == nil)
return;
- // Objects that are explicitly freed must be at least 16 bytes in size,
- // so that they are not allocated using tiny alloc.
- len = k.len + 1;
- if(len < TinySize)
- len = TinySize;
- arg[0] = runtime·malloc(len);
+ arg[0] = runtime·malloc(k.len + 1);
runtime·memmove(arg[0], k.str, k.len);
arg[0][k.len] = 0;
- len = v.len + 1;
- if(len < TinySize)
- len = TinySize;
- arg[1] = runtime·malloc(len);
+ arg[1] = runtime·malloc(v.len + 1);
runtime·memmove(arg[1], v.str, v.len);
arg[1][v.len] = 0;
runtime·asmcgocall((void*)_cgo_setenv, arg);
- runtime·free(arg[0]);
- runtime·free(arg[1]);
}
return runtime·mallocgc(size, nil, FlagNoInvokeGC);
}
-// Free the object whose base pointer is v.
-void
-runtime·free(void *v)
-{
- int32 sizeclass;
- MSpan *s;
- MCache *c;
- uintptr size;
-
- if(v == nil)
- return;
-
- // If you change this also change mgc0.c:/^sweep,
- // which has a copy of the guts of free.
-
- if(g->m->mallocing)
- runtime·throw("malloc/free - deadlock");
- g->m->mallocing = 1;
-
- if(!runtime·mlookup(v, nil, nil, &s)) {
- runtime·printf("free %p: not an allocated block\n", v);
- runtime·throw("free runtime·mlookup");
- }
- size = s->elemsize;
- sizeclass = s->sizeclass;
- // Objects that are smaller than TinySize can be allocated using tiny alloc,
- // if then such object is combined with an object with finalizer, we will crash.
- if(size < TinySize)
- runtime·throw("freeing too small block");
-
- if(runtime·debug.allocfreetrace)
- runtime·tracefree(v, size);
-
- // Ensure that the span is swept.
- // If we free into an unswept span, we will corrupt GC bitmaps.
- runtime·MSpan_EnsureSwept(s);
-
- if(s->specials != nil)
- runtime·freeallspecials(s, v, size);
-
- c = g->m->mcache;
- if(sizeclass == 0) {
- // Large object.
- s->needzero = 1;
- // Must mark v freed before calling unmarkspan and MHeap_Free:
- // they might coalesce v into other spans and change the bitmap further.
- runtime·markfreed(v);
- runtime·unmarkspan(v, s->npages<<PageShift);
- // NOTE(rsc,dvyukov): The original implementation of efence
- // in CL 22060046 used SysFree instead of SysFault, so that
- // the operating system would eventually give the memory
- // back to us again, so that an efence program could run
- // longer without running out of memory. Unfortunately,
- // calling SysFree here without any kind of adjustment of the
- // heap data structures means that when the memory does
- // come back to us, we have the wrong metadata for it, either in
- // the MSpan structures or in the garbage collection bitmap.
- // Using SysFault here means that the program will run out of
- // memory fairly quickly in efence mode, but at least it won't
- // have mysterious crashes due to confused memory reuse.
- // It should be possible to switch back to SysFree if we also
- // implement and then call some kind of MHeap_DeleteSpan.
- if(runtime·debug.efence) {
- s->limit = nil; // prevent mlookup from finding this span
- runtime·SysFault((void*)(s->start<<PageShift), size);
- } else
- runtime·MHeap_Free(&runtime·mheap, s, 1);
- c->local_nlargefree++;
- c->local_largefree += size;
- } else {
- // Small object.
- if(size > 2*sizeof(uintptr))
- ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
- else if(size > sizeof(uintptr))
- ((uintptr*)v)[1] = 0;
- // Must mark v freed before calling MCache_Free:
- // it might coalesce v and other blocks into a bigger span
- // and change the bitmap further.
- c->local_nsmallfree[sizeclass]++;
- c->local_cachealloc -= size;
- if(c->alloc[sizeclass] == s) {
- // We own the span, so we can just add v to the freelist
- runtime·markfreed(v);
- ((MLink*)v)->next = s->freelist;
- s->freelist = v;
- s->ref--;
- } else {
- // Someone else owns this span. Add to free queue.
- runtime·MCache_Free(c, v, sizeclass, size);
- }
- }
- g->m->mallocing = 0;
-}
-
int32
runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
{
// Initialize the rest of the allocator.
runtime·MHeap_Init(&runtime·mheap);
g->m->mcache = runtime·allocmcache();
-
- // See if it works.
- runtime·free(runtime·malloc(TinySize));
}
void*
uintptr tinysize;
// The rest is not accessed on every malloc.
MSpan* alloc[NumSizeClasses]; // spans to allocate from
- MCacheList free[NumSizeClasses];// lists of explicitly freed objects
StackFreeList stackcache[NumStackOrders];
};
MSpan* runtime·MCache_Refill(MCache *c, int32 sizeclass);
-void runtime·MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
void runtime·MCache_ReleaseAll(MCache *c);
void runtime·stackcache_clear(MCache *c);
byte *limit; // end of data in span
Lock specialLock; // guards specials list
Special *specials; // linked list of special records sorted by offset.
- MLink *freebuf; // objects freed explicitly, not incorporated into freelist yet
};
void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages);
int32 sizeclass;
MSpan nonempty; // list of spans with a free object
MSpan empty; // list of spans with no free objects (or cached in an MCache)
- int32 nfree; // # of objects available in nonempty spans
};
void runtime·MCentral_Init(MCentral *c, int32 sizeclass);
MSpan* runtime·MCentral_CacheSpan(MCentral *c);
void runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s);
bool runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
-void runtime·MCentral_FreeList(MCentral *c, MLink *start); // TODO: need this?
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
void runtime·gc(int32 force);
uintptr runtime·sweepone(void);
void runtime·markallocated(void *v, uintptr size, uintptr size0, Type* typ, bool scan);
-void runtime·markfreed(void *v);
void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime·unmarkspan(void *v, uintptr size);
void runtime·purgecachedstats(MCache*);
bool runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*);
void runtime·removefinalizer(void*);
void runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot);
-
-void runtime·freeallspecials(MSpan *span, void *p, uintptr size);
bool runtime·freespecial(Special *s, void *p, uintptr size, bool freed);
// Information from the compiler about the layout of stack frames.
MSpan*
runtime·MCache_Refill(MCache *c, int32 sizeclass)
{
- MCacheList *l;
MSpan *s;
g->m->locks++;
if(s != &emptymspan)
runtime·MCentral_UncacheSpan(&runtime·mheap.central[sizeclass], s);
- // Push any explicitly freed objects to the central lists.
- // Not required, but it seems like a good time to do it.
- l = &c->free[sizeclass];
- if(l->nlist > 0) {
- runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], l->list);
- l->list = nil;
- l->nlist = 0;
- }
-
// Get a new cached span from the central lists.
s = runtime·MCentral_CacheSpan(&runtime·mheap.central[sizeclass]);
if(s == nil)
return s;
}
-void
-runtime·MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size)
-{
- MCacheList *l;
-
- // Put on free list.
- l = &c->free[sizeclass];
- p->next = l->list;
- l->list = p;
- l->nlist++;
-
- // We transfer a span at a time from MCentral to MCache,
- // so we'll do the same in the other direction.
- if(l->nlist >= (runtime·class_to_allocnpages[sizeclass]<<PageShift)/size) {
- runtime·MCentral_FreeList(&runtime·mheap.central[sizeclass], l->list);
- l->list = nil;
- l->nlist = 0;
- }
-}
-
void
runtime·MCache_ReleaseAll(MCache *c)
{
int32 i;
MSpan *s;
- MCacheList *l;
for(i=0; i<NumSizeClasses; i++) {
s = c->alloc[i];
runtime·MCentral_UncacheSpan(&runtime·mheap.central[i], s);
c->alloc[i] = &emptymspan;
}
- l = &c->free[i];
- if(l->nlist > 0) {
- runtime·MCentral_FreeList(&runtime·mheap.central[i], l->list);
- l->list = nil;
- l->nlist = 0;
- }
}
}
#include "malloc.h"
static bool MCentral_Grow(MCentral *c);
-static void MCentral_Free(MCentral *c, MLink *v);
static void MCentral_ReturnToHeap(MCentral *c, MSpan *s);
// Initialize a single central free list.
runtime·throw("empty span");
if(s->freelist == nil)
runtime·throw("freelist empty");
- c->nfree -= n;
runtime·MSpanList_Remove(s);
runtime·MSpanList_InsertBack(&c->empty, s);
s->incache = true;
void
runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
{
- MLink *v;
int32 cap, n;
runtime·lock(c);
s->incache = false;
- // Move any explicitly freed items from the freebuf to the freelist.
- while((v = s->freebuf) != nil) {
- s->freebuf = v->next;
- runtime·markfreed(v);
- v->next = s->freelist;
- s->freelist = v;
- s->ref--;
- }
-
if(s->ref == 0) {
// Free back to heap. Unlikely, but possible.
MCentral_ReturnToHeap(c, s); // unlocks c
cap = (s->npages << PageShift) / s->elemsize;
n = cap - s->ref;
if(n > 0) {
- c->nfree += n;
runtime·MSpanList_Remove(s);
runtime·MSpanList_Insert(&c->nonempty, s);
}
runtime·unlock(c);
}
-// Free the list of objects back into the central free list c.
-// Called from runtime·free.
-void
-runtime·MCentral_FreeList(MCentral *c, MLink *start)
-{
- MLink *next;
-
- runtime·lock(c);
- for(; start != nil; start = next) {
- next = start->next;
- MCentral_Free(c, start);
- }
- runtime·unlock(c);
-}
-
-// Helper: free one object back into the central free list.
-// Caller must hold lock on c on entry. Holds lock on exit.
-static void
-MCentral_Free(MCentral *c, MLink *v)
-{
- MSpan *s;
-
- // Find span for v.
- s = runtime·MHeap_Lookup(&runtime·mheap, v);
- if(s == nil || s->ref == 0)
- runtime·throw("invalid free");
- if(s->state != MSpanInUse)
- runtime·throw("free into stack span");
- if(s->sweepgen != runtime·mheap.sweepgen)
- runtime·throw("free into unswept span");
-
- // If the span is currently being used unsynchronized by an MCache,
- // we can't modify the freelist. Add to the freebuf instead. The
- // items will get moved to the freelist when the span is returned
- // by the MCache.
- if(s->incache) {
- v->next = s->freebuf;
- s->freebuf = v;
- return;
- }
-
- // Move span to nonempty if necessary.
- if(s->freelist == nil) {
- runtime·MSpanList_Remove(s);
- runtime·MSpanList_Insert(&c->nonempty, s);
- }
-
- // Add the object to span's free list.
- runtime·markfreed(v);
- v->next = s->freelist;
- s->freelist = v;
- s->ref--;
- c->nfree++;
-
- // If s is completely freed, return it to the heap.
- if(s->ref == 0) {
- MCentral_ReturnToHeap(c, s); // unlocks c
- runtime·lock(c);
- }
-}
-
// Free n objects from a span s back into the central free list c.
// Called during sweep.
// Returns true if the span was returned to heap. Sets sweepgen to
end->next = s->freelist;
s->freelist = start;
s->ref -= n;
- c->nfree += n;
// delay updating sweepgen until here. This is the signal that
// the span may be used in an MCache, so it must come after the
runtime·markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
runtime·lock(c);
- c->nfree += n;
runtime·MSpanList_Insert(&c->nonempty, s);
return true;
}
static void
MCentral_ReturnToHeap(MCentral *c, MSpan *s)
{
- int32 size;
-
- size = runtime·class_to_size[c->sizeclass];
runtime·MSpanList_Remove(s);
s->needzero = 1;
s->freelist = nil;
if(s->ref != 0)
runtime·throw("ref wrong");
- c->nfree -= (s->npages << PageShift) / size;
runtime·unlock(c);
runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
runtime·MHeap_Free(&runtime·mheap, s, 0);
switch(i) {
case RootData:
scanblock(data, edata - data, work.gcdata);
- //scanblock(data, edata - data, ScanConservatively);
break;
case RootBss:
scanblock(bss, ebss - bss, work.gcbss);
- //scanblock(bss, ebss - bss, ScanConservatively);
break;
case RootFinalizers:
// important to set sweepgen before returning it to heap
runtime·atomicstore(&s->sweepgen, sweepgen);
sweepgenset = true;
- // See note about SysFault vs SysFree in malloc.goc.
+ // NOTE(rsc,dvyukov): The original implementation of efence
+ // in CL 22060046 used SysFree instead of SysFault, so that
+ // the operating system would eventually give the memory
+ // back to us again, so that an efence program could run
+ // longer without running out of memory. Unfortunately,
+ // calling SysFree here without any kind of adjustment of the
+ // heap data structures means that when the memory does
+ // come back to us, we have the wrong metadata for it, either in
+ // the MSpan structures or in the garbage collection bitmap.
+ // Using SysFault here means that the program will run out of
+ // memory fairly quickly in efence mode, but at least it won't
+ // have mysterious crashes due to confused memory reuse.
+ // It should be possible to switch back to SysFree if we also
+ // implement and then call some kind of MHeap_DeleteSpan.
if(runtime·debug.efence) {
s->limit = nil; // prevent mlookup from finding this span
runtime·SysFault(p, size);
}
if(s->sweepgen != sg-2 || !runtime·cas(&s->sweepgen, sg-2, sg-1))
continue;
- if(s->incache)
- runtime·throw("sweep of incache span");
npages = s->npages;
if(!runtime·MSpan_Sweep(s))
npages = 0;
if(runtime·debug.allocfreetrace)
runtime·tracegc();
- // This is required while we explicitly free objects and have imprecise GC.
- // If we don't do this, then scanblock can queue an object for scanning;
- // then another thread frees this object during RootFlushCaches;
- // then the first thread scans the object; then debug check in scanblock
- // finds this object already freed and throws.
- if(Debug)
- flushallmcaches();
-
g->m->traceback = 2;
t0 = args->start_time;
work.tstart = args->start_time;
f = &fb->fin[i];
framesz = sizeof(Eface) + f->nret;
if(framecap < framesz) {
- runtime·free(frame);
// The frame does not contain pointers interesting for GC,
// all not yet finalized objects are stored in finq.
// If we do not mark it as FlagNoScan,
mp->ptrarg[1] = nil;
}
-// mark the block at v as freed.
-void
-runtime·markfreed(void *v)
-{
- uintptr *b, off, shift, xbits, bits;
-
- if((byte*)v > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
- runtime·throw("markfreed: bad pointer");
-
- off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
- b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
- shift = (off % wordsPerBitmapWord) * gcBits;
- xbits = *b;
- bits = (xbits>>shift) & bitMask;
-
- if(bits == bitMiddle)
- runtime·throw("bad bits in markfreed");
- if(bits == bitBoundary)
- return; // FlagNoGC object
- if(!g->m->gcing || work.nproc == 1) {
- // During normal operation (not GC), the span bitmap is not updated concurrently,
- // because either the span is cached or accesses are protected with MCentral lock.
- *b = (xbits & ~(bitMask<<shift)) | (bitBoundary<<shift);
- } else {
- // During GC other threads concurrently mark heap.
- for(;;) {
- xbits = *b;
- if(runtime·casp((void**)b, (void*)xbits, (void*)((xbits & ~(bitMask<<shift)) | (bitBoundary<<shift))))
- break;
- }
- }
-}
-
// mark the span of memory at v as having n blocks of the given size.
// if leftover is true, there is left over space at the end of the span.
void
span->specialLock.key = 0;
span->specials = nil;
span->needzero = 0;
- span->freebuf = nil;
}
// Initialize an empty doubly-linked list.
return true;
}
}
-
-// Free all special records for p.
-void
-runtime·freeallspecials(MSpan *span, void *p, uintptr size)
-{
- Special *s, **t, *list;
- uintptr offset;
-
- if(span->sweepgen != runtime·mheap.sweepgen)
- runtime·throw("runtime: freeallspecials: unswept span");
- // first, collect all specials into the list; then, free them
- // this is required to not cause deadlock between span->specialLock and proflock
- list = nil;
- offset = (uintptr)p - (span->start << PageShift);
- runtime·lock(&span->specialLock);
- t = &span->specials;
- while((s = *t) != nil) {
- if(offset + size <= s->offset)
- break;
- if(offset <= s->offset) {
- *t = s->next;
- s->next = list;
- list = s;
- } else
- t = &s->next;
- }
- runtime·unlock(&span->specialLock);
-
- while(list != nil) {
- s = list;
- list = s->next;
- if(!runtime·freespecial(s, p, size, true))
- runtime·throw("can't explicitly free an object with a finalizer");
- }
-}
p->deferpool[sc] = d;
// No need to wipe out pointers in argp/pc/fn/args,
// because we empty the pool before GC.
- } else
- runtime·free(d);
+ }
}
// Create a new deferred function fn with siz bytes of arguments.
new = runtime·malloc(cap*sizeof(new[0]));
if(new == nil)
runtime·throw("runtime: cannot allocate memory");
- if(runtime·allg != nil) {
+ if(runtime·allg != nil)
runtime·memmove(new, runtime·allg, runtime·allglen*sizeof(new[0]));
- runtime·free(runtime·allg);
- }
runtime·allg = new;
allgcap = cap;
}
uintptr runtime·ifacehash(Iface, uintptr);
uintptr runtime·efacehash(Eface, uintptr);
void* runtime·malloc(uintptr size);
-void runtime·free(void *v);
void runtime·runpanic(Panic*);
uintptr runtime·getcallersp(void*);
int32 runtime·mcount(void);
n = timers.cap*3 / 2;
nt = runtime·malloc(n*sizeof nt[0]);
runtime·memmove(nt, timers.t, timers.len*sizeof nt[0]);
- runtime·free(timers.t);
timers.t = nt;
timers.cap = n;
}