remove internal functions from traces in gopprof instead.
R=r
CC=golang-dev
https://golang.org/cl/855046
'__builtin_vec_new',
'operator new',
'operator new[]',
+ # Go
+ 'catstring',
+ 'copyin',
+ 'gostring',
+ 'gostringsize',
+ 'hash_init',
+ 'hash_subtable_new',
+ 'hash_conv',
+ 'hash_grow',
+ 'hash_insert_internal',
+ 'hash_insert',
+ 'mapassign',
+ 'runtime.mapassign1',
+ 'makechan',
+ 'makemap',
+ 'mal',
+ 'mallocgc',
+ 'runtime.catstring',
+ 'runtime.ifaceT2E',
+ 'runtime.ifaceT2I',
+ 'runtime.makechan',
+ 'runtime.makemap',
+ 'runtime.makeslice',
+ 'runtime.mal',
+ 'runtime.slicebytetostring',
+ 'runtime.sliceinttostring',
+ 'runtime.stringtoslicebyte',
+ 'runtime.stringtosliceint',
# These mark the beginning/end of our custom sections
'__start_google_malloc',
'__stop_google_malloc',
if(wid <= sizeof(*dst))
algarray[alg].copy(wid, dst, src);
else {
- p = malx(wid, 1);
+ p = mal(wid);
algarray[alg].copy(wid, p, src);
*dst = p;
}
t = (Type*)((Eface*)typ.data-1);
if(t->kind&KindNoPointers)
- ret = mallocgc(t->size, RefNoPointers, 1, 1, 1);
+ ret = mallocgc(t->size, RefNoPointers, 1, 1);
else
ret = mal(t->size);
FLUSH(&ret);
size = n*t->size;
if(t->kind&KindNoPointers)
- ret = mallocgc(size, RefNoPointers, 1, 1, 1);
+ ret = mallocgc(size, RefNoPointers, 1, 1);
else
ret = mal(size);
FLUSH(&ret);
// Small objects are allocated from the per-thread cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
void*
-mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed, int32 skip_depth)
+mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
{
int32 sizeclass, rate;
MCache *c;
m->mcache->next_sample = fastrand1() % (2*rate);
profile:
*ref |= RefProfiled;
- MProf_Malloc(skip_depth+1, v, size);
+ MProf_Malloc(v, size);
}
}
void*
malloc(uintptr size)
{
- return mallocgc(size, 0, 0, 1, 1);
+ return mallocgc(size, 0, 0, 1);
}
// Free the object whose base pointer is v.
void*
mal(uintptr n)
{
- return mallocgc(n, 0, 1, 1, 2);
-}
-
-void*
-malx(uintptr n, int32 skip_delta)
-{
- return mallocgc(n, 0, 1, 1, 2+skip_delta);
+ return mallocgc(n, 0, 1, 1);
}
// Stack allocator uses malloc/free most of the time,
unlock(&stacks);
return v;
}
- v = mallocgc(n, RefNoProfiling, 0, 0, 0);
+ v = mallocgc(n, RefNoProfiling, 0, 0);
if(!mlookup(v, nil, nil, nil, &ref))
throw("stackalloc mlookup");
*ref = RefStack;
MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
-void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed, int32 skip_depth);
+void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
int32 mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
void gc(int32 force);
RefFlags = 0xFFFF0000U,
};
-void MProf_Malloc(int32, void*, uintptr);
+void MProf_Malloc(void*, uintptr);
void MProf_Free(void*, uintptr);
// Malloc profiling settings.
newtab.max *= 3;
}
- newtab.key = mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1, 2);
- newtab.val = mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1, 2);
+ newtab.key = mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1);
+ newtab.val = mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
for(i=0; i<fintab.max; i++) {
void *k;
mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0)
return b;
- b = mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1, 0);
+ b = mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1);
bucketmem += sizeof *b + nstk*sizeof stk[0];
memmove(b->stk, stk, nstk*sizeof stk[0]);
b->hash = h;
if(ah->addr == (addr>>20))
goto found;
- ah = mallocgc(sizeof *ah, RefNoProfiling, 0, 1, 0);
+ ah = mallocgc(sizeof *ah, RefNoProfiling, 0, 1);
addrmem += sizeof *ah;
ah->next = addrhash[h];
ah->addr = addr>>20;
found:
if((e = addrfree) == nil) {
- e = mallocgc(64*sizeof *e, RefNoProfiling, 0, 0, 0);
+ e = mallocgc(64*sizeof *e, RefNoProfiling, 0, 0);
addrmem += 64*sizeof *e;
for(i=0; i+1<64; i++)
e[i].next = &e[i+1];
// Called by malloc to record a profiled block.
void
-MProf_Malloc(int32 skip, void *p, uintptr size)
+MProf_Malloc(void *p, uintptr size)
{
int32 nstk;
uintptr stk[32];
return;
m->nomemprof++;
- nstk = callers(1+skip, stk, 32);
+ nstk = callers(1, stk, 32);
lock(&proflock);
b = stkbucket(stk, nstk);
b->allocs++;
int32 mcmp(byte*, byte*, uint32);
void memmove(void*, void*, uint32);
void* mal(uintptr);
-void* malx(uintptr size, int32 skip_delta);
uint32 cmpstring(String, String);
String catstring(String, String);
String gostring(byte*);
ret.cap = cap;
if((t->elem->kind&KindNoPointers))
- ret.array = mallocgc(size, RefNoPointers, 1, 1, 1);
+ ret.array = mallocgc(size, RefNoPointers, 1, 1);
else
ret.array = mal(size);
if(l == 0)
return emptystring;
- s.str = malx(l+1, 1); // leave room for NUL for C runtime (e.g., callers of getenv)
+ s.str = mal(l+1); // leave room for NUL for C runtime (e.g., callers of getenv)
s.len = l;
if(l > maxstring)
maxstring = l;
}
func stringtoslicebyte(s String) (b Slice) {
- b.array = mallocgc(s.len, RefNoPointers, 1, 1, 1);
+ b.array = mallocgc(s.len, RefNoPointers, 1, 1);
b.len = s.len;
b.cap = s.len;
mcpy(b.array, s.str, s.len);
n++;
}
- b.array = mallocgc(n*sizeof(r[0]), RefNoPointers, 1, 1, 1);
+ b.array = mallocgc(n*sizeof(r[0]), RefNoPointers, 1, 1);
b.len = n;
b.cap = n;
p = s.str;