// 2, 3, and 4 are all plausible maximums depending
// on the hardware details of the machine. The garbage
// collector scales well to 4 cpus.
- MaxGcproc = 4,
+ MaxGcproc = 16,
};
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
void runtime·MCentral_Init(MCentral *c, int32 sizeclass);
int32 runtime·MCentral_AllocList(MCentral *c, int32 n, MLink **first);
void runtime·MCentral_FreeList(MCentral *c, int32 n, MLink *first);
+void runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
}
// Free n objects back into the central free list.
-// Return the number of objects allocated.
-// The objects are linked together by their first words.
-// On return, *pstart points at the first object and *pend at the last.
void
runtime·MCentral_FreeList(MCentral *c, int32 n, MLink *start)
{
}
}
+// Free n objects from a span s back into the central free list c.
+// Called from GC.
+void
+runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
+{
+ int32 size;
+
+ runtime·lock(c);
+
+ // Move to nonempty if necessary.
+ if(s->freelist == nil) {
+ runtime·MSpanList_Remove(s);
+ runtime·MSpanList_Insert(&c->nonempty, s);
+ }
+
+ // Add the objects back to s's free list.
+ end->next = s->freelist;
+ s->freelist = start;
+ s->ref -= n;
+ c->nfree += n;
+
+ // If s is completely freed, return it to the heap.
+ if(s->ref == 0) {
+ size = runtime·class_to_size[c->sizeclass];
+ runtime·MSpanList_Remove(s);
+ *(uintptr*)(s->start<<PageShift) = 1; // needs zeroing
+ s->freelist = nil;
+ c->nfree -= (s->npages << PageShift) / size;
+ runtime·unlock(c);
+ runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ runtime·MHeap_Free(&runtime·mheap, s, 0);
+ } else {
+ runtime·unlock(c);
+ }
+}
+
void
runtime·MGetSizeClassInfo(int32 sizeclass, uintptr *sizep, int32 *npagesp, int32 *nobj)
{
byte *p;
MCache *c;
byte *arena_start;
+ MLink *start, *end;
+ int32 nfree;
arena_start = runtime·mheap.arena_start;
p = (byte*)(s->start << PageShift);
npages = runtime·class_to_allocnpages[cl];
n = (npages << PageShift) / size;
}
+ nfree = 0;
+ start = end = nil;
+ c = m->mcache;
// Sweep through n objects of given size starting at p.
// This thread owns the span now, so it can manipulate
// Mark freed; restore block boundary bit.
*bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
- c = m->mcache;
if(s->sizeclass == 0) {
// Free large span.
runtime·unmarkspan(p, 1<<PageShift);
*(uintptr*)p = 1; // needs zeroing
runtime·MHeap_Free(&runtime·mheap, s, 1);
+ c->local_alloc -= size;
+ c->local_nfree++;
} else {
// Free small object.
if(size > sizeof(uintptr))
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
- c->local_by_size[s->sizeclass].nfree++;
- runtime·MCache_Free(c, p, s->sizeclass, size);
+ if(nfree)
+ end->next = (MLink*)p;
+ else
+ start = (MLink*)p;
+ end = (MLink*)p;
+ nfree++;
}
- c->local_alloc -= size;
- c->local_nfree++;
+ }
+
+ if(nfree) {
+ c->local_by_size[s->sizeclass].nfree += nfree;
+ c->local_alloc -= size * nfree;
+ c->local_nfree += nfree;
+ c->local_cachealloc -= nfree * size;
+ c->local_objects -= nfree;
+ runtime·MCentral_FreeSpan(&runtime·mheap.central[cl], s, nfree, start, end);
}
}