OFILES=$(RT0OFILES) $(LIBOFILES)
OS_H=$(GOARCH)_$(GOOS).h
-HFILES=runtime.h hashmap.h $(OS_H_)
+HFILES=runtime.h hashmap.h malloc.h $(OS_H_)
install: rt0 $(LIB) runtime.acid
cp $(RT0OFILES) $(GOROOT)/lib
if(m->mallocing) {
lock(&stacks);
if(stacks.size == 0)
- FixAlloc_Init(&stacks, n, SysAlloc);
+ FixAlloc_Init(&stacks, n, SysAlloc, nil, nil);
if(stacks.size != n) {
printf("stackalloc: in malloc, size=%D want %d", stacks.size, n);
throw("stackalloc");
//
// Memory returned by FixAlloc_Alloc is not zeroed.
// The caller is responsible for locking around FixAlloc calls.
+// Callers can keep state in the object but the first word is
+// smashed by freeing and reallocating.
struct FixAlloc
{
uintptr size;
void *(*alloc)(uintptr);
+ void (*first)(void *arg, byte *p); // called first time p is returned
+ void *arg;
MLink *list;
byte *chunk;
uint32 nchunk;
};
-void FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr));
+void FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
void* FixAlloc_Alloc(FixAlloc *f);
void FixAlloc_Free(FixAlloc *f, void *p);
enum
{
MSpanInUse = 0,
- MSpanFree
+ MSpanFree,
+ MSpanListHead,
+ MSpanDead,
};
struct MSpan
{
MSpan *next; // in a span linked list
MSpan *prev; // in a span linked list
+ MSpan *allnext; // in the list of all spans
PageID start; // starting page number
uintptr npages; // number of pages in span
MLink *freelist; // list of free objects
uint32 ref; // number of allocated objects in this span
uint32 sizeclass; // size class
- uint32 state; // MSpanInUse or MSpanFree
+ uint32 state; // MSpanInUse etc
union {
uint32 *gcref; // sizeclass > 0
uint32 gcref0; // sizeclass == 0
Lock;
MSpan free[MaxMHeapList]; // free lists of given length
MSpan large; // free lists length >= MaxMHeapList
+ MSpan *allspans;
// span lookup
MHeapMap map;
// Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory.
void
-FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr))
+FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
{
f->size = size;
f->alloc = alloc;
+ f->first = first;
+ f->arg = arg;
f->list = nil;
f->chunk = nil;
f->nchunk = 0;
f->nchunk = FixAllocChunk;
}
v = f->chunk;
+ if(f->first)
+ f->first(f->arg, v);
f->chunk += f->size;
f->nchunk -= f->size;
return v;
if(s->state != MSpanInUse)
return;
-
+
p = (byte*)(s->start << PageShift);
if(s->sizeclass == 0) {
// Large block.
}
}
-static void
-sweepspanlist(MSpan *list)
-{
- MSpan *s, *next;
-
- for(s=list->next; s != list; s=next) {
- next = s->next; // in case s gets moved
- sweepspan(s);
- }
-}
-
static void
sweep(void)
{
- int32 i;
+ MSpan *s;
// Sweep all the spans.
-
- for(i=0; i<nelem(mheap.central); i++) {
- // Sweep nonempty (has some free blocks available)
- // before sweeping empty (is completely allocated),
- // because finding something to free in a span from empty
- // will move it into nonempty, and we must not sweep
- // the same span twice.
- sweepspanlist(&mheap.central[i].nonempty);
- sweepspanlist(&mheap.central[i].empty);
- }
+ for(s = mheap.allspans; s != nil; s = s->allnext)
+ sweepspan(s);
}
// Semaphore, not Lock, so that the goroutine
static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
static MSpan *BestFit(MSpan*, uintptr, MSpan*);
+static void
+RecordSpan(void *vh, byte *p)
+{
+ MHeap *h;
+ MSpan *s;
+
+ h = vh;
+ s = (MSpan*)p;
+ s->allnext = h->allspans;
+ h->allspans = s;
+}
+
// Initialize the heap; fetch memory using alloc.
void
MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
{
uint32 i;
- FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc);
- FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc);
+ FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
+ FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
MHeapMap_Init(&h->map, alloc);
// h->mapcache needs no init
for(i=0; i<nelem(h->free); i++)
for(n=0; n<npage; n++)
if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0)
MHeapMapCache_SET(&h->mapcache, s->start+n, 0);
-
- // Need a list of large allocated spans.
- // They have sizeclass == 0, so use heap.central[0].empty,
- // since central[0] is otherwise unused.
- MSpanList_Insert(&h->central[0].empty, s);
} else {
// Save cache entries for this span.
// If there's a size class, there aren't that many pages.
s->npages += t->npages;
MHeapMap_Set(&h->map, s->start, s);
MSpanList_Remove(t);
+ t->state = MSpanDead;
FixAlloc_Free(&h->spanalloc, t);
}
if((t = MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
s->npages += t->npages;
MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
MSpanList_Remove(t);
+ t->state = MSpanDead;
FixAlloc_Free(&h->spanalloc, t);
}
void
MSpanList_Init(MSpan *list)
{
+ list->state = MSpanListHead;
list->next = list;
list->prev = list;
}
gp->status = Grunning;
if(debug > 1) {
lock(&debuglock);
- printf("m%d run g%d\n", m->id, gp->goid);
+ printf("m%d run g%d at %p\n", m->id, gp->goid, gp->sched.PC);
+ traceback(gp->sched.PC, gp->sched.SP+8, gp);
unlock(&debuglock);
}
m->curg = gp;
notewakeup(&sched.stopped);
}
unlock(&sched);
- // leave SP around for gc; poison PC to make sure it's not used
- g->sched.SP = (byte*)&callerpc;
- g->sched.PC = (byte*)0xdeadbeef;
+ // leave SP around for gc and traceback
+ gosave(&g->sched);
}
// The goroutine g exited its system call.