rcvr *rtype // receiver type, or nil if none
}
+type layoutType struct {
+ t *rtype
+ argSize uintptr // size of arguments
+ retOffset uintptr // offset of return values.
+}
+
var layoutCache struct {
sync.RWMutex
- m map[layoutKey]*rtype
+ m map[layoutKey]layoutType
}
// funcLayout computes a struct type representing the layout of the
// The returned type exists only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in
// the name for possible debugging use.
-func funcLayout(t *rtype, rcvr *rtype) *rtype {
+func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr) {
if t.Kind() != Func {
panic("reflect: funcSignature of non-func type")
}
k := layoutKey{t, rcvr}
layoutCache.RLock()
- if x := layoutCache.m[k]; x != nil {
+ if x := layoutCache.m[k]; x.t != nil {
layoutCache.RUnlock()
- return x
+ return x.t, x.argSize, x.retOffset
}
layoutCache.RUnlock()
layoutCache.Lock()
- if x := layoutCache.m[k]; x != nil {
+ if x := layoutCache.m[k]; x.t != nil {
layoutCache.Unlock()
- return x
+ return x.t, x.argSize, x.retOffset
}
tt := (*funcType)(unsafe.Pointer(t))
}
offset += arg.size
}
+ argSize = offset
+ if runtime.GOARCH == "amd64p32" {
+ offset = align(offset, 8)
+ }
offset = align(offset, ptrSize)
+ retOffset = offset
for _, res := range tt.out {
offset = align(offset, uintptr(res.align))
if res.pointers() {
// cache result for future callers
if layoutCache.m == nil {
- layoutCache.m = make(map[layoutKey]*rtype)
+ layoutCache.m = make(map[layoutKey]layoutType)
+ }
+ layoutCache.m[k] = layoutType{
+ t: x,
+ argSize: argSize,
+ retOffset: retOffset,
}
- layoutCache.m[k] = x
layoutCache.Unlock()
- return x
+ return x, argSize, retOffset
}
}
// Compute frame type, allocate a chunk of memory for frame
- frametype := funcLayout(t, rcvrtype)
+ frametype, _, retOffset := funcLayout(t, rcvrtype)
args := unsafe_New(frametype)
off := uintptr(0)
}
off += n
}
- off = (off + ptrSize - 1) &^ (ptrSize - 1)
// Call.
call(fn, args, uint32(frametype.size))
// Copy return values out of args.
ret := make([]Value, nout)
+ off = retOffset
for i := 0; i < nout; i++ {
tv := t.Out(i)
a := uintptr(tv.Align())
// Copy results back into argument frame.
if len(ftyp.out) > 0 {
off += -off & (ptrSize - 1)
+ if runtime.GOARCH == "amd64p32" {
+ off = align(off, 8)
+ }
for i, arg := range ftyp.out {
typ := arg
v := out[i]
rcvr := ctxt.rcvr
rcvrtype := rcvr.typ
t, fn := methodReceiver("call", rcvr, ctxt.method)
- frametype := funcLayout(t, rcvrtype)
+ frametype, argSize, retOffset := funcLayout(t, rcvrtype)
// Make a new frame that is one word bigger so we can store the receiver.
args := unsafe_New(frametype)
// Copy in receiver and rest of args.
storeRcvr(rcvr, args)
- memmove(unsafe.Pointer(uintptr(args)+ptrSize), frame, frametype.size-ptrSize)
+ memmove(unsafe.Pointer(uintptr(args)+ptrSize), frame, argSize-ptrSize)
// Call.
call(fn, args, uint32(frametype.size))
- // Copy return values.
- memmove(frame, unsafe.Pointer(uintptr(args)+ptrSize), frametype.size-ptrSize)
+ // Copy return values. On amd64p32, the beginning of return values
+ // is 64-bit aligned, so the caller's frame layout (which doesn't have
+ // a receiver) is different from the layout of the fn call, which has
+ // a receiver.
+ // Ignore any changes to args and just copy return values.
+ callerRetOffset := retOffset - ptrSize
+ if runtime.GOARCH == "amd64p32" {
+ callerRetOffset = align(argSize-ptrSize, 8)
+ }
+ memmove(unsafe.Pointer(uintptr(frame)+callerRetOffset),
+ unsafe.Pointer(uintptr(args)+retOffset), frametype.size-retOffset)
}
// funcName returns the name of f, for use in error messages.