MCacheList *l;
MLink *v;
byte *tiny;
- P *p;
if(size == 0) {
// All 0-length allocations use this pointer.
// the allocator reduces number of allocations by ~12% and
// reduces heap size by ~20%.
- p = m->p;
- tinysize = p->tinysize;
+ tinysize = c->tinysize;
if(size <= tinysize) {
- tiny = p->tiny;
+ tiny = c->tiny;
// Align tiny pointer for required (conservative) alignment.
if((size&7) == 0)
tiny = (byte*)ROUND((uintptr)tiny, 8);
tiny = (byte*)ROUND((uintptr)tiny, 4);
else if((size&1) == 0)
tiny = (byte*)ROUND((uintptr)tiny, 2);
- size1 = size + (tiny - p->tiny);
+ size1 = size + (tiny - c->tiny);
if(size1 <= tinysize) {
// The object fits into existing tiny block.
v = (MLink*)tiny;
- p->tiny += size1;
- p->tinysize -= size1;
+ c->tiny += size1;
+ c->tinysize -= size1;
m->mallocing = 0;
m->locks--;
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
// See if we need to replace the existing tiny block with the new one
// based on amount of remaining free space.
if(TinySize-size > tinysize) {
- p->tiny = (byte*)v + size;
- p->tinysize = TinySize - size;
+ c->tiny = (byte*)v + size;
+ c->tinysize = TinySize - size;
}
size = TinySize;
goto done;
// so they are grouped here for better caching.
int32 next_sample; // trigger heap sample after allocating this many bytes
intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
+ // Allocator cache for tiny objects w/o pointers.
+ // See "Tiny allocator" comment in malloc.goc.
+ byte* tiny;
+ uintptr tinysize;
// The rest is not accessed on every malloc.
MCacheList list[NumSizeClasses];
// Local allocator stats, flushed during GC.
MCache* mcache;
Defer* deferpool[5]; // pool of available Defer structs of different sizes (see panic.c)
- // Allocator cache for tiny objects w/o pointers.
- // See "Tiny allocator" comment in malloc.goc.
- byte* tiny;
- uintptr tinysize;
-
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
uint64 goidcache;
uint64 goidcacheend;