chan.$O\
iface.$O\
array.$O\
+ mem.$O\
print.$O\
rune.$O\
proc.$O\
sema.$O\
- stack.$O\
string.$O\
symtab.$O\
sys_file.$O\
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+
+// Stubs for memory management.
+// In a separate file so they can be overridden during testing of gc.
+
+enum
+{
+ NHUNK = 20<<20,
+
+ PROT_NONE = 0x00,
+ PROT_READ = 0x01,
+ PROT_WRITE = 0x02,
+ PROT_EXEC = 0x04,
+
+ MAP_FILE = 0x0000,
+ MAP_SHARED = 0x0001,
+ MAP_PRIVATE = 0x0002,
+ MAP_FIXED = 0x0010,
+ MAP_ANON = 0x1000, // not on Linux - TODO(rsc)
+};
+
+void*
+stackalloc(uint32 n)
+{
+ return mal(n);
+}
+
+void
+stackfree(void*)
+{
+}
+
+// Convenient wrapper around mmap.
+static void*
+brk(uint32 n)
+{
+ byte *v;
+
+ v = sys·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0);
+ m->mem.nmmap += n;
+ return v;
+}
+
+// Allocate n bytes of memory. Note that this gets used
+// to allocate new stack segments, so at each call to a function
+// you have to ask yourself "would it be okay to call mal recursively
+// right here?" The answer is yes unless we're in the middle of
+// editing the malloc state in m->mem.
+void*
+mal(uint32 n)
+{
+ byte* v;
+
+ // round to keep everything 64-bit aligned
+ n = rnd(n, 8);
+
+ // be careful. calling any function might invoke
+ // mal to allocate more stack.
+ if(n > NHUNK) {
+ v = brk(n);
+ } else {
+ // allocate a new hunk if this one is too small
+ if(n > m->mem.nhunk) {
+ // here we're in the middle of editing m->mem
+ // (we're about to overwrite m->mem.hunk),
+ // so we can't call brk - it might call mal to grow the
+ // stack, and the recursive call would allocate a new
+ // hunk, and then once brk returned we'd immediately
+ // overwrite that hunk with our own.
+ // (the net result would be a memory leak, not a crash.)
+ // so we have to call sys·mmap directly - it is written
+ // in assembly and tagged not to grow the stack.
+ m->mem.hunk =
+ sys·mmap(nil, NHUNK, PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, 0, 0);
+ m->mem.nhunk = NHUNK;
+ m->mem.nmmap += NHUNK;
+ }
+ v = m->mem.hunk;
+ m->mem.hunk += n;
+ m->mem.nhunk -= n;
+ }
+ m->mem.nmal += n;
+ return v;
+}
+
+void
+sys·mal(uint32 n, uint8 *ret)
+{
+ ret = mal(n);
+ FLUSH(&ret);
+}
int32 mcount; // number of ms that have been created
int32 mcpu; // number of ms executing on cpu
int32 mcpumax; // max number of ms allowed on cpu
+ int32 gomaxprocs;
int32 msyscall; // number of ms in system calls
int32 predawn; // running initialization, don't run new gs.
+
+ Note stopped; // one g can wait here for ms to stop
};
Sched sched;
int32 n;
byte *p;
- sched.mcpumax = 1;
+ sched.gomaxprocs = 1;
p = getenv("GOMAXPROCS");
if(p != nil && (n = atoi(p)) != 0)
- sched.mcpumax = n;
+ sched.gomaxprocs = n;
+ sched.mcpumax = sched.gomaxprocs;
sched.mcount = 1;
sched.predawn = 1;
}
// 160 is the slop amount known to the stack growth code
g = mal(sizeof(G));
- stk = mal(160 + stacksize);
+ stk = stackalloc(160 + stacksize);
g->stack0 = stk;
g->stackguard = stk + 160;
g->stackbase = stk + 160 + stacksize;
throw("all goroutines are asleep - deadlock!");
m->nextg = nil;
noteclear(&m->havenextg);
+ notewakeup(&sched.stopped);
unlock(&sched);
notesleep(&m->havenextg);
return gp;
}
+// TODO(rsc): Remove. This is only temporary,
+// for the mark and sweep collector.
+void
+stoptheworld(void)
+{
+ lock(&sched);
+ sched.mcpumax = 1;
+ while(sched.mcpu > 1) {
+ noteclear(&sched.stopped);
+ unlock(&sched);
+ notesleep(&sched.stopped);
+ lock(&sched);
+ }
+ unlock(&sched);
+}
+
+// TODO(rsc): Remove. This is only temporary,
+// for the mark and sweep collector.
+void
+starttheworld(void)
+{
+ lock(&sched);
+ sched.mcpumax = sched.gomaxprocs;
+ matchmg();
+ unlock(&sched);
+}
+
// Called to start an M.
void
mstart(void)
unlock(&debuglock);
}
lock(&sched);
+ g->status = Gsyscall;
sched.mcpu--;
sched.msyscall++;
if(sched.gwait != 0)
matchmg();
unlock(&sched);
+ // leave SP around for gc; poison PC to make sure it's not used
+ g->sched.SP = (byte*)&callerpc;
+ g->sched.PC = (byte*)0xdeadbeef;
}
// The goroutine g exited its system call.
}
lock(&sched);
+ g->status = Grunning;
sched.msyscall--;
sched.mcpu++;
// Fast path - if there's room for this m, we're done.
throw("no return at end of a typed function");
}
-enum
-{
- NHUNK = 20<<20,
-
- PROT_NONE = 0x00,
- PROT_READ = 0x01,
- PROT_WRITE = 0x02,
- PROT_EXEC = 0x04,
-
- MAP_FILE = 0x0000,
- MAP_SHARED = 0x0001,
- MAP_PRIVATE = 0x0002,
- MAP_FIXED = 0x0010,
- MAP_ANON = 0x1000, // not on Linux - TODO(rsc)
-};
-
void
throw(int8 *s)
{
return n;
}
-// Convenient wrapper around mmap.
-static void*
-brk(uint32 n)
-{
- byte *v;
-
- v = sys·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0);
- m->mem.nmmap += n;
- return v;
-}
-
-// Allocate n bytes of memory. Note that this gets used
-// to allocate new stack segments, so at each call to a function
-// you have to ask yourself "would it be okay to call mal recursively
-// right here?" The answer is yes unless we're in the middle of
-// editing the malloc state in m->mem.
-void*
-mal(uint32 n)
-{
- byte* v;
-
- // round to keep everything 64-bit aligned
- n = rnd(n, 8);
-
- // be careful. calling any function might invoke
- // mal to allocate more stack.
- if(n > NHUNK) {
- v = brk(n);
- } else {
- // allocate a new hunk if this one is too small
- if(n > m->mem.nhunk) {
- // here we're in the middle of editing m->mem
- // (we're about to overwrite m->mem.hunk),
- // so we can't call brk - it might call mal to grow the
- // stack, and the recursive call would allocate a new
- // hunk, and then once brk returned we'd immediately
- // overwrite that hunk with our own.
- // (the net result would be a memory leak, not a crash.)
- // so we have to call sys·mmap directly - it is written
- // in assembly and tagged not to grow the stack.
- m->mem.hunk =
- sys·mmap(nil, NHUNK, PROT_READ|PROT_WRITE,
- MAP_ANON|MAP_PRIVATE, 0, 0);
- m->mem.nhunk = NHUNK;
- m->mem.nmmap += NHUNK;
- }
- v = m->mem.hunk;
- m->mem.hunk += n;
- m->mem.nhunk -= n;
- }
- m->mem.nmal += n;
- return v;
-}
-
-void
-sys·mal(uint32 n, uint8 *ret)
-{
- ret = mal(n);
- FLUSH(&ret);
-}
-
static uint64 uvnan = 0x7FF0000000000001ULL;
static uint64 uvinf = 0x7FF0000000000000ULL;
static uint64 uvneginf = 0xFFF0000000000000ULL;
Gidle,
Grunnable,
Grunning,
+ Gsyscall,
Gwaiting,
Gmoribund,
Gdead,
void* stackalloc(uint32);
void stackfree(void*);
+// TODO(rsc): Remove. These are only temporary,
+// for the mark and sweep collector.
+void stoptheworld(void);
+void starttheworld(void);
+
/*
* mutual exclusion locks. in the uncontended case,
* as fast as spin locks (just a few user-level instructions),
void sys·readfile(string, string, bool);
void sys·bytestorune(byte*, int32, int32, int32, int32);
void sys·stringtorune(string, int32, int32, int32);
+void sys·semacquire(uint32*);
+void sys·semrelease(uint32*);
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-
-// Stubs for stack management.
-// In a separate file so they can be overridden during testing of gc.
-
-void*
-stackalloc(uint32 n)
-{
- return mal(n);
-}
-
-void
-stackfree(void*)
-{
-}
OFILES=\
allocator.$O\
malloc.$O\
+ mem.$O\
+ ms.$O\
pagemap.$O\
triv.$O\
clean:
rm -f *.$O $(TARG)
-runtime: allocator.$O malloc.$O pagemap.$O triv.$O stack.$O
+runtime: $(OFILES)
6ar grc $(GOROOT)/lib/lib_$(GOARCH)_$(GOOS).a $^
-
export var frozen bool
export func testsizetoclass()
export var allocated int64
+export func find(uint64) (obj *byte, size int64, ref *int32, ok bool)
+export func gc()
#include "malloc.h"
-typedef struct Span Span;
-typedef struct Central Central;
-
-// A Span contains metadata about a range of pages.
-enum {
- SpanInUse = 0, // span has been handed out by allocator
- SpanFree = 1, // span is in central free list
-};
-struct Span
-{
- Span *next; // in free lists
- byte *base; // first byte in span
- uintptr length; // number of pages in span
- int32 cl;
- int32 state; // state (enum above)
-// int ref; // reference count if state == SpanInUse (for GC)
-// void *type; // object type if state == SpanInUse (for GC)
-};
-
-// The Central cache contains a list of free spans,
-// as well as free lists of small blocks.
-struct Central
-{
- Lock;
- Span *free[256];
- Span *large; // free spans >= MaxPage pages
-};
-
-static Central central;
-static PageMap spanmap;
+Central central;
+PageMap spanmap;
// Insert a new span into the map.
static void
static void freespan(Span*);
+// Linked list of spans.
+// TODO(rsc): Remove - should be able to walk pagemap.
+Span *spanfirst;
+Span *spanlast;
+static void
+addtolist(Span *s)
+{
+ if(spanlast) {
+ s->aprev = spanlast;
+ s->aprev->anext = s;
+ } else {
+ s->aprev = nil;
+ spanfirst = s;
+ }
+ s->anext = nil;
+ spanlast = s;
+}
+
+/*
+static void
+delfromlist(Span *s)
+{
+ if(s->aprev)
+ s->aprev->anext = s->anext;
+ else
+ spanfirst = s->anext;
+ if(s->anext)
+ s->anext->aprev = s->aprev;
+ else
+ spanlast = s->aprev;
+}
+*/
+
// Allocate a span of at least n pages.
static Span*
allocspan(int32 npage)
//printf("New span %d for %d\n", allocnpage, npage);
s->base = trivalloc(allocnpage<<PageShift);
insertspan(s);
+ addtolist(s);
havespan:
// If span is bigger than needed, redistribute the remainder.
s1->length = s->length - npage;
shrinkspan(s, npage);
insertspan(s1);
+ addtolist(s1);
freespan(s1);
}
s->state = SpanInUse;
}
// Free a span.
+// TODO(rsc): Coalesce adjacent free spans.
static void
freespan(Span *s)
{
// Small objects are kept on per-size free lists in the M.
// There are SmallFreeClasses (defined in runtime.h) different lists.
-static int32 classtosize[SmallFreeClasses] = {
+int32 classtosize[SmallFreeClasses] = {
/*
seq 8 8 127 | sed 's/$/,/' | fmt
seq 128 16 255 | sed 's/$/,/' | fmt
chunk = (chunk+PageMask) & ~PageMask;
s = allocspan(chunk>>PageShift);
//printf("New class %d\n", cl);
+
s->state = SpanInUse;
s->cl = cl;
siz = classtosize[cl];
- n = chunk/siz;
+ n = chunk/(siz+sizeof(s->refbase[0]));
p = s->base;
//printf("centralgrab cl=%d siz=%d n=%d\n", cl, siz, n);
- for(i=0; i<n-1; i++) {
- *(void**)p = p+siz;
+ for(i=0; i<n; i++) {
+ if(i < n-1)
+ *(void**)p = p+siz;
p += siz;
}
+ s->refbase = (int32*)p;
+
+ // TODO(rsc): Remove - only for mark/sweep
+ for(i=0; i<n; i++)
+ s->refbase[i] = RefFree;
+
*pn = n;
return s->base;
}
unlock(¢ral);
}
-//printf("alloc from cl %d\n", cl);
+//printf("alloc from cl %d %p\n", cl, p);
// advance linked list.
m->freelist[cl] = *p;
+ // TODO(rsc): If cl > 0, can store ref ptr in *(p+1),
+ // avoiding call to findobj.
+ // Or could get rid of RefFree, which is only truly
+ // necessary for mark/sweep.
+ int32 *ref;
+ if(!findobj(p, nil, nil, &ref))
+ throw("bad findobj");
+ if(*ref != RefFree)
+ throw("double alloc");
+ *ref = 0;
+
// Blocks on free list are zeroed except for
// the linked list pointer that we just used. Zero it.
*p = 0;
unlock(¢ral);
s->state = SpanInUse;
s->cl = -1;
+ s->ref = 0;
return s->base;
}
FLUSH(&out);
}
+// Check whether v points into a known memory block.
+// If so, return true with
+// *obj = base pointer of object (can pass to free)
+// *size = size of object
+// *ref = pointer to ref count for object
+// Object might already be freed, in which case *ref == RefFree.
+bool
+findobj(void *v, void **obj, int64 *size, int32 **ref)
+{
+ Span *s;
+ int32 siz, off, indx;
+
+ s = spanofptr(v);
+ if(s == nil || s->state != SpanInUse)
+ return false;
+
+ // Big object
+ if(s->cl < 0) {
+ if(obj)
+ *obj = s->base;
+ if(size)
+ *size = s->length<<PageShift;
+ if(ref)
+ *ref = &s->ref;
+ return true;
+ }
+
+ // Small object
+ if((byte*)v >= (byte*)s->refbase)
+ return false;
+ siz = classtosize[s->cl];
+ off = (byte*)v - (byte*)s->base;
+ indx = off/siz;
+ if(obj)
+ *obj = s->base + indx*siz;
+ if(size)
+ *size = siz;
+ if(ref)
+ *ref = s->refbase + indx;
+ return true;
+}
+
+void
+allocator·find(uint64 ptr, byte *obj, int64 siz, int32 *ref, bool ok)
+{
+ ok = findobj((void*)ptr, &obj, &siz, &ref);
+ FLUSH(&ok);
+}
+
// Free object with base pointer v.
void
free(void *v)
{
void **p;
Span *s;
- int32 siz, off;
+ int32 siz, off, n;
s = spanofptr(v);
if(s->state != SpanInUse)
throw("free - invalid pointer2");
// TODO: For large spans, maybe just return the
// memory to the operating system and let it zero it.
+ if(s->ref != 0 && s->ref != RefManual && s->ref != RefStack)
+ throw("free - bad ref count");
+ s->ref = RefFree;
sys·memclr(s->base, s->length << PageShift);
//printf("Free big %D\n", s->length);
allocator·allocated -= s->length << PageShift;
}
// Small object should be aligned properly.
+ if((byte*)v >= (byte*)s->refbase)
+ throw("free - invalid pointer4");
+
siz = classtosize[s->cl];
off = (byte*)v - (byte*)s->base;
if(off%siz)
throw("free - invalid pointer3");
+ n = off/siz;
+ if(s->refbase[n] != 0 && s->refbase[n] != RefManual && s->refbase[n] != RefStack)
+ throw("free - bad ref count1");
+ s->refbase[n] = RefFree;
// Zero and add to free list.
sys·memclr(v, siz);
PageMask = (1<<PageShift) - 1,
};
+#define RefFree 0xffffffffU
+#define RefManual 0xfffffffeU
+#define RefStack 0xfffffffdU
+
enum {
PMBits = 64 - PageShift,
PMLevels = 4,
void *level0[PMLevelSize];
};
+typedef struct Span Span;
+typedef struct Central Central;
+
+// A Span contains metadata about a range of pages.
+enum {
+ SpanInUse = 0, // span has been handed out by allocator
+ SpanFree = 1, // span is in central free list
+};
+struct Span
+{
+ Span *aprev; // in list of all spans
+ Span *anext;
+
+ Span *next; // in free lists
+ byte *base; // first byte in span
+ uintptr length; // number of pages in span
+ int32 cl;
+ int32 state; // state (enum above)
+ union {
+ int32 ref; // reference count if state == SpanInUse (for GC)
+ int32 *refbase; // ptr to packed ref counts
+ };
+// void *type; // object type if state == SpanInUse (for GC)
+};
+
+// The Central cache contains a list of free spans,
+// as well as free lists of small blocks.
+struct Central
+{
+ Lock;
+ Span *free[256];
+ Span *large; // free spans >= MaxPage pages
+};
+
extern int64 allocator·allocated;
extern int64 allocator·footprint;
extern bool allocator·frozen;
void* alloc(int32);
void free(void*);
+bool findobj(void*, void**, int64*, int32**);
+
+extern Central central;
+extern PageMap spanmap;
+extern int32 classtosize[SmallFreeClasses];
+extern Span *spanfirst, *spanlast;
stackalloc(uint32 n)
{
void *v;
+ int32 *ref;
v = alloc(n);
//printf("stackalloc %d = %p\n", n, v);
+ ref = nil;
+ findobj(v, nil, nil, &ref);
+ *ref = RefStack;
return v;
}
//printf("stackfree %p\n", v);
free(v);
}
+
+void*
+mal(uint32 n)
+{
+ return alloc(n);
+}
+
+void
+sys·mal(uint32 n, uint8 *ret)
+{
+ ret = alloc(n);
+ FLUSH(&ret);
+}
import (
"allocator";
- "rand"
+ "rand";
+ "syscall"
)
var footprint int64;
}
siz := rand.rand() >> (11 + rand.urand32() % 20);
base := allocator.malloc(siz);
+ ptr := uint64(syscall.BytePtr(base))+uint64(siz/2);
+ obj, size, ref, ok := allocator.find(ptr);
+ if obj != base || *ref != 0 || !ok {
+ panicln("find", siz, obj, ref, ok);
+ }
blocks[b].base = base;
blocks[b].siz = siz;
allocated += int64(siz);