}
}
+func BenchmarkCall(b *testing.B) {
+ fv := ValueOf(func(a, b string) {})
+ b.ReportAllocs()
+ b.RunParallel(func(pb *testing.PB) {
+ args := []Value{ValueOf("a"), ValueOf("b")}
+ for pb.Next() {
+ fv.Call(args)
+ }
+ })
+}
+
func TestMakeFunc(t *testing.T) {
f := dummy
fv := MakeFunc(TypeOf(f), func(in []Value) []Value { return in })
var ft *rtype
var s *bitVector
if rcvr != nil {
- ft, argSize, retOffset, s = funcLayout(t.(*rtype), rcvr.(*rtype))
+ ft, argSize, retOffset, s, _ = funcLayout(t.(*rtype), rcvr.(*rtype))
} else {
- ft, argSize, retOffset, s = funcLayout(t.(*rtype), nil)
+ ft, argSize, retOffset, s, _ = funcLayout(t.(*rtype), nil)
}
frametype = ft
for i := uint32(0); i < s.n; i += 2 {
code := **(**uintptr)(unsafe.Pointer(&dummy))
// makeFuncImpl contains a stack map for use by the runtime
- _, _, _, stack := funcLayout(t, nil)
+ _, _, _, stack, _ := funcLayout(t, nil)
impl := &makeFuncImpl{code: code, stack: stack, typ: ftyp, fn: fn}
code := **(**uintptr)(unsafe.Pointer(&dummy))
// methodValue contains a stack map for use by the runtime
- _, _, _, stack := funcLayout(funcType, nil)
+ _, _, _, stack, _ := funcLayout(funcType, nil)
fv := &methodValue{
fn: code,
argSize uintptr // size of arguments
retOffset uintptr // offset of return values.
stack *bitVector
+ framePool *sync.Pool
}
var layoutCache struct {
// The returned type exists only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in
// the name for possible debugging use.
-func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stack *bitVector) {
+func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stack *bitVector, framePool *sync.Pool) {
if t.Kind() != Func {
panic("reflect: funcLayout of non-func type")
}
layoutCache.RLock()
if x := layoutCache.m[k]; x.t != nil {
layoutCache.RUnlock()
- return x.t, x.argSize, x.retOffset, x.stack
+ return x.t, x.argSize, x.retOffset, x.stack, x.framePool
}
layoutCache.RUnlock()
layoutCache.Lock()
if x := layoutCache.m[k]; x.t != nil {
layoutCache.Unlock()
- return x.t, x.argSize, x.retOffset, x.stack
+ return x.t, x.argSize, x.retOffset, x.stack, x.framePool
}
tt := (*funcType)(unsafe.Pointer(t))
if layoutCache.m == nil {
layoutCache.m = make(map[layoutKey]layoutType)
}
+ framePool = &sync.Pool{New: func() interface{} {
+ return unsafe_New(x)
+ }}
layoutCache.m[k] = layoutType{
t: x,
argSize: argSize,
retOffset: retOffset,
stack: stack,
+ framePool: framePool,
}
layoutCache.Unlock()
- return x, argSize, retOffset, stack
+ return x, argSize, retOffset, stack, framePool
}
// ifaceIndir reports whether t is stored indirectly in an interface value.
}
nout := t.NumOut()
- // Compute frame type, allocate a chunk of memory for frame
- frametype, _, retOffset, _ := funcLayout(t, rcvrtype)
- args := unsafe_New(frametype)
+ // Compute frame type.
+ frametype, _, retOffset, _, framePool := funcLayout(t, rcvrtype)
+
+ // Allocate a chunk of memory for frame.
+ var args unsafe.Pointer
+ if nout == 0 {
+ args = framePool.Get().(unsafe.Pointer)
+ } else {
+ // Can't use pool if the function has return values.
+ // We will leak pointer to args in ret, so its lifetime is not scoped.
+ args = unsafe_New(frametype)
+ }
off := uintptr(0)
// Copy inputs into args.
runtime.GC()
}
- // Copy return values out of args.
- ret := make([]Value, nout)
- off = retOffset
- for i := 0; i < nout; i++ {
- tv := t.Out(i)
- a := uintptr(tv.Align())
- off = (off + a - 1) &^ (a - 1)
- fl := flagIndir | flag(tv.Kind())
- ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl}
- off += tv.Size()
+ var ret []Value
+ if nout == 0 {
+ memclr(args, frametype.size)
+ framePool.Put(args)
+ } else {
+ // Zero the now unused input area of args,
+ // because the Values returned by this function contain pointers to the args object,
+ // and will thus keep the args object alive indefinitely.
+ memclr(args, retOffset)
+ // Copy return values out of args.
+ ret = make([]Value, nout)
+ off = retOffset
+ for i := 0; i < nout; i++ {
+ tv := t.Out(i)
+ a := uintptr(tv.Align())
+ off = (off + a - 1) &^ (a - 1)
+ fl := flagIndir | flag(tv.Kind())
+ ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl}
+ off += tv.Size()
+ }
}
return ret
func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
rcvr := ctxt.rcvr
rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method)
- frametype, argSize, retOffset, _ := funcLayout(t, rcvrtype)
+ frametype, argSize, retOffset, _, framePool := funcLayout(t, rcvrtype)
// Make a new frame that is one word bigger so we can store the receiver.
- args := unsafe_New(frametype)
+ args := framePool.Get().(unsafe.Pointer)
// Copy in receiver and rest of args.
storeRcvr(rcvr, args)
unsafe.Pointer(uintptr(args)+retOffset),
retOffset,
frametype.size-retOffset)
+
+ memclr(args, frametype.size)
+ framePool.Put(args)
}
// funcName returns the name of f, for use in error messages.
//go:noescape
func typedslicecopy(elemType *rtype, dst, src sliceHeader) int
+//go:noescape
+func memclr(ptr unsafe.Pointer, n uintptr)
+
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
// the compiler cannot follow.
//go:noescape
func memclr(ptr unsafe.Pointer, n uintptr)
+//go:linkname reflect_memclr reflect.memclr
+func reflect_memclr(ptr unsafe.Pointer, n uintptr) {
+ memclr(ptr, n)
+}
+
// memmove copies n bytes from "from" to "to".
// in memmove_*.s
//go:noescape