struct ibv_context {
xxpthread_mutex_t mutex;
};
+
+int add(int x, int y) {
+ return x+y;
+};
*/
import "C"
import (
type Context struct {
ctx *C.struct_ibv_context
}
+
+func benchCgoCall(b *testing.B) {
+ const x = C.int(2)
+ const y = C.int(3)
+ for i := 0; i < b.N; i++ {
+ C.add(x, y)
+ }
+}
func Test1328(t *testing.T) { test1328(t) }
func TestParallelSleep(t *testing.T) { testParallelSleep(t) }
func TestSetEnv(t *testing.T) { testSetEnv(t) }
+
+func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
PUSHL (g_sched+gobuf_sp)(SI)
MOVL SP, (g_sched+gobuf_sp)(SI)
- // Switch to m->curg stack and call runtime.cgocallback
+ // Switch to m->curg stack and call runtime.cgocallbackg
// with the three arguments. Because we are taking over
// the execution of m->curg but *not* resuming what had
// been running, we need to save that information (m->curg->gobuf)
// so that we can restore it when we're done.
// We can restore m->curg->gobuf.sp easily, because calling
- // runtime.cgocallback leaves SP unchanged upon return.
+ // runtime.cgocallbackg leaves SP unchanged upon return.
// To save m->curg->gobuf.pc, we push it onto the stack.
// This has the added benefit that it looks to the traceback
- // routine like cgocallback is going to return to that
- // PC (because we defined cgocallback to have
+ // routine like cgocallbackg is going to return to that
+ // PC (because we defined cgocallbackg to have
// a frame size of 12, the same amount that we use below),
// so that the traceback will seamlessly trace back into
// the earlier calls.
PUSHQ (g_sched+gobuf_sp)(SI)
MOVQ SP, (g_sched+gobuf_sp)(SI)
- // Switch to m->curg stack and call runtime.cgocallback
+ // Switch to m->curg stack and call runtime.cgocallbackg
// with the three arguments. Because we are taking over
// the execution of m->curg but *not* resuming what had
// been running, we need to save that information (m->curg->gobuf)
// so that we can restore it when we're done.
// We can restore m->curg->gobuf.sp easily, because calling
- // runtime.cgocallback leaves SP unchanged upon return.
+ // runtime.cgocallbackg leaves SP unchanged upon return.
// To save m->curg->gobuf.pc, we push it onto the stack.
// This has the added benefit that it looks to the traceback
- // routine like cgocallback is going to return to that
- // PC (because we defined cgocallback to have
+ // routine like cgocallbackg is going to return to that
+ // PC (because we defined cgocallbackg to have
// a frame size of 24, the same amount that we use below),
// so that the traceback will seamlessly trace back into
// the earlier calls.
// stack (not an m->g0 stack). First it calls runtime.exitsyscall, which will
// block until the $GOMAXPROCS limit allows running this goroutine.
// Once exitsyscall has returned, it is safe to do things like call the memory
-// allocator or invoke the Go callback function p.GoF. runtime.cgocallback
+// allocator or invoke the Go callback function p.GoF. runtime.cgocallbackg
// first defers a function to unwind m->g0.sched.sp, so that if p.GoF
// panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack
// and the m->curg stack will be unwound in lock step.
void
runtime·cgocall(void (*fn)(void*), void *arg)
{
- Defer *d;
+ Defer d;
if(!runtime·iscgo)
runtime·throw("cgocall unavailable");
* Lock g to m to ensure we stay on the same stack if we do a
* cgo callback.
*/
- d = nil;
+ d.nofree = false;
if(m->lockedg == nil) {
m->lockedg = g;
g->lockedm = m;
// Add entry to defer stack in case of panic.
- d = runtime·malloc(sizeof(*d));
- d->fn = (byte*)unlockm;
- d->siz = 0;
- d->link = g->defer;
- d->argp = (void*)-1; // unused because unwindm never recovers
- g->defer = d;
+ d.fn = (byte*)unlockm;
+ d.siz = 0;
+ d.link = g->defer;
+ d.argp = (void*)-1; // unused because unlockm never recovers
+ d.nofree = true;
+ g->defer = &d;
}
/*
runtime·asmcgocall(fn, arg);
runtime·exitsyscall();
- if(d != nil) {
- if(g->defer != d || d->fn != (byte*)unlockm)
+ if(d.nofree) {
+ if(g->defer != &d || d.fn != (byte*)unlockm)
runtime·throw("runtime: bad defer entry in cgocallback");
- g->defer = d->link;
- runtime·free(d);
+ g->defer = d.link;
unlockm();
}
}
void
runtime·cgocallbackg(void (*fn)(void), void *arg, uintptr argsize)
{
- Defer *d;
+ Defer d;
if(g != m->curg)
runtime·throw("runtime: bad g in cgocallback");
runtime·exitsyscall(); // coming out of cgo call
// Add entry to defer stack in case of panic.
- d = runtime·malloc(sizeof(*d));
- d->fn = (byte*)unwindm;
- d->siz = 0;
- d->link = g->defer;
- d->argp = (void*)-1; // unused because unwindm never recovers
- g->defer = d;
+ d.fn = (byte*)unwindm;
+ d.siz = 0;
+ d.link = g->defer;
+ d.argp = (void*)-1; // unused because unwindm never recovers
+ d.nofree = true;
+ g->defer = &d;
// Invoke callback.
reflect·call((byte*)fn, arg, argsize);
// Pop defer.
// Do not unwind m->g0->sched.sp.
// Our caller, cgocallback, will do that.
- if(g->defer != d || d->fn != (byte*)unwindm)
+ if(g->defer != &d || d.fn != (byte*)unwindm)
runtime·throw("runtime: bad defer entry in cgocallback");
- g->defer = d->link;
- runtime·free(d);
+ g->defer = d.link;
runtime·entersyscall(); // going back to cgo call
}
runtime·memmove(argp, d->args, d->siz);
g->defer = d->link;
fn = d->fn;
- runtime·free(d);
+ if(!d->nofree)
+ runtime·free(d);
runtime·jmpdefer(fn, argp);
}
while((d = g->defer) != nil) {
g->defer = d->link;
reflect·call(d->fn, d->args, d->siz);
- runtime·free(d);
+ if(!d->nofree)
+ runtime·free(d);
}
}
runtime·mcall(recovery);
runtime·throw("recovery failed"); // mcall should not return
}
- runtime·free(d);
+ if(!d->nofree)
+ runtime·free(d);
}
// ran out of deferred calls - old-school panic now
else
gp->sched.sp = (byte*)d->argp - 2*sizeof(uintptr);
gp->sched.pc = d->pc;
- runtime·free(d);
+ if(!d->nofree)
+ runtime·free(d);
runtime·gogo(&gp->sched, 1);
}
struct Defer
{
int32 siz;
+ bool nofree;
byte* argp; // where args were copied from
byte* pc;
byte* fn;