]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile, etc: store method tables as offsets
authorDavid Crawshaw <crawshaw@golang.org>
Mon, 28 Mar 2016 14:32:27 +0000 (10:32 -0400)
committerDavid Crawshaw <crawshaw@golang.org>
Wed, 13 Apr 2016 13:03:11 +0000 (13:03 +0000)
This CL introduces the typeOff type and a lookup method of the same
name that can turn a typeOff offset into an *rtype.

In a typical Go binary (built with buildmode=exe, pie, c-archive, or
c-shared), there is one moduledata and all typeOff values are offsets
relative to firstmoduledata.types. This makes computing the pointer
cheap in typical programs.

With buildmode=shared (and one day, buildmode=plugin) there are
multiple modules whose relative offset is determined at runtime.
We identify a type in the general case by the pair of the original
*rtype that references it and its typeOff value. We determine
the module from the original pointer, and then use the typeOff from
there to compute the final *rtype.

To ensure there is only one *rtype representing each type, the
runtime initializes a typemap for each module, using any identical
type from an earlier module when resolving that offset. This means
that types computed from an offset match the type mapped by the
pointer dynamic relocations.

A series of followup CLs will replace other *rtype values with typeOff
(and name/*string with nameOff).

For types created at runtime by reflect, type offsets are treated as
global IDs and reference into a reflect offset map kept by the runtime.

darwin/amd64:
cmd/go:  -57KB (0.6%)
jujud:  -557KB (0.8%)

linux/amd64 PIE:
cmd/go: -361KB (3.0%)
jujud:  -3.5MB (4.2%)

For #6853.

Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96
Reviewed-on: https://go-review.googlesource.com/21285
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: David Crawshaw <crawshaw@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>

12 files changed:
src/cmd/compile/internal/gc/reflect.go
src/cmd/internal/obj/link.go
src/cmd/link/internal/ld/deadcode.go
src/cmd/link/internal/ld/decodesym.go
src/reflect/export_test.go
src/reflect/type.go
src/reflect/value.go
src/runtime/iface.go
src/runtime/proc.go
src/runtime/runtime1.go
src/runtime/symtab.go
src/runtime/type.go

index ea67634260b628d7f3c7c9ab500dc85bf835d4f6..2bd50b466595e1d8c0cf6f95ec19a2ab549c8983 100644 (file)
@@ -75,7 +75,7 @@ func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{})
        if t.Sym == nil && len(methods(t)) == 0 {
                return 0
        }
-       return 2*Widthptr + 2*Widthint
+       return 2 * Widthptr
 }
 
 func makefield(name string, t *Type) *Field {
@@ -580,13 +580,23 @@ func dextratype(s *Sym, ot int, t *Type, dataAdd int) int {
 
        ot = dgopkgpath(s, ot, typePkg(t))
 
-       // slice header
-       ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+dataAdd)
-
-       n := len(m)
-       ot = duintxx(s, ot, uint64(n), Widthint)
-       ot = duintxx(s, ot, uint64(n), Widthint)
+       dataAdd += Widthptr + 2 + 2
+       if Widthptr == 8 {
+               dataAdd += 4
+       }
+       mcount := len(m)
+       if mcount != int(uint16(mcount)) {
+               Fatalf("too many methods on %s: %d", t, mcount)
+       }
+       if dataAdd != int(uint16(dataAdd)) {
+               Fatalf("methods are too far away on %s: %d", t, dataAdd)
+       }
 
+       ot = duint16(s, ot, uint16(mcount))
+       ot = duint16(s, ot, uint16(dataAdd))
+       if Widthptr == 8 {
+               ot = duint32(s, ot, 0) // align for following pointers
+       }
        return ot
 }
 
@@ -609,6 +619,7 @@ func typePkg(t *Type) *Pkg {
 // dextratypeData dumps the backing array for the []method field of
 // runtime.uncommontype.
 func dextratypeData(s *Sym, ot int, t *Type) int {
+       lsym := Linksym(s)
        for _, a := range methods(t) {
                // ../../../../runtime/type.go:/method
                exported := exportname(a.name)
@@ -617,21 +628,24 @@ func dextratypeData(s *Sym, ot int, t *Type) int {
                        pkg = a.pkg
                }
                ot = dname(s, ot, a.name, "", pkg, exported)
-               ot = dmethodptr(s, ot, dtypesym(a.mtype))
-               ot = dmethodptr(s, ot, a.isym)
-               ot = dmethodptr(s, ot, a.tsym)
+               ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype)))
+               ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym))
+               ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym))
+               if Widthptr == 8 {
+                       ot = duintxxLSym(lsym, ot, 0, 4) // pad to reflect.method size
+               }
        }
        return ot
 }
 
-func dmethodptr(s *Sym, off int, x *Sym) int {
-       duintptr(s, off, 0)
-       r := obj.Addrel(Linksym(s))
-       r.Off = int32(off)
-       r.Siz = uint8(Widthptr)
-       r.Sym = Linksym(x)
-       r.Type = obj.R_METHOD
-       return off + Widthptr
+func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int {
+       duintxxLSym(s, ot, 0, 4)
+       r := obj.Addrel(s)
+       r.Off = int32(ot)
+       r.Siz = 4
+       r.Sym = x
+       r.Type = obj.R_METHODOFF
+       return ot + 4
 }
 
 var kinds = []int{
@@ -1286,18 +1300,29 @@ ok:
        ggloblsym(s, int32(ot), int16(dupok|obj.RODATA))
 
        // generate typelink.foo pointing at s = type.foo.
+       //
        // The linker will leave a table of all the typelinks for
-       // types in the binary, so reflect can find them.
-       // We only need the link for unnamed composites that
-       // we want be able to find.
-       if t.Sym == nil {
+       // types in the binary, so the runtime can find them.
+       //
+       // When buildmode=shared, all types are in typelinks so the
+       // runtime can deduplicate type pointers.
+       keep := Ctxt.Flag_dynlink
+       if !keep && t.Sym == nil {
+               // For an unnamed type, we only need the link if the type can
+               // be created at run time by reflect.PtrTo and similar
+               // functions. If the type exists in the program, those
+               // functions must return the existing type structure rather
+               // than creating a new one.
                switch t.Etype {
                case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSTRUCT:
-                       slink := typelinkLSym(t)
-                       dsymptrOffLSym(slink, 0, Linksym(s), 0)
-                       ggloblLSym(slink, 4, int16(dupok|obj.RODATA))
+                       keep = true
                }
        }
+       if keep {
+               slink := typelinkLSym(t)
+               dsymptrOffLSym(slink, 0, Linksym(s), 0)
+               ggloblLSym(slink, 4, int16(dupok|obj.RODATA))
+       }
 
        return s
 }
index 42aaa5f4f06436e662f6e1a454582f6141e0b8b2..55c9f4f9e2cfa72416f3c5717bd5bd75a37a00b0 100644 (file)
@@ -457,8 +457,8 @@ const (
        // R_ADDRMIPS (only used on mips64) resolves to a 32-bit external address,
        // by loading the address into a register with two instructions (lui, ori).
        R_ADDRMIPS
-       // R_ADDROFF resolves to an offset from the beginning of the section holding
-       // the data being relocated to the referenced symbol.
+       // R_ADDROFF resolves to a 32-bit offset from the beginning of the section
+       // holding the data being relocated to the referenced symbol.
        R_ADDROFF
        R_SIZE
        R_CALL
@@ -492,11 +492,12 @@ const (
        // should be linked into the final binary, even if there are no other
        // direct references. (This is used for types reachable by reflection.)
        R_USETYPE
-       // R_METHOD resolves to an *rtype for a method.
-       // It is used when linking from the uncommonType of another *rtype, and
-       // may be set to zero by the linker if it determines the method text is
-       // unreachable by the linked program.
-       R_METHOD
+       // R_METHODOFF resolves to a 32-bit offset from the beginning of the section
+       // holding the data being relocated to the referenced symbol.
+       // It is a variant of R_ADDROFF used when linking from the uncommonType of a
+       // *rtype, and may be set to zero by the linker if it determines the method
+       // text is unreachable by the linked program.
+       R_METHODOFF
        R_POWER_TOC
        R_GOTPCREL
        // R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address
index 51fae02ef069c2328f59a2a10dbccb8ee259e328..c83a104a546a97062db80d6d20b86f308ac8b8e4 100644 (file)
@@ -19,7 +19,7 @@ import (
 //
 // This flood fill is wrapped in logic for pruning unused methods.
 // All methods are mentioned by relocations on their receiver's *rtype.
-// These relocations are specially defined as R_METHOD by the compiler
+// These relocations are specially defined as R_METHODOFF by the compiler
 // so we can detect and manipulated them here.
 //
 // There are three ways a method of a reachable type can be invoked:
@@ -100,7 +100,7 @@ func deadcode(ctxt *Link) {
                d.flood()
        }
 
-       // Remove all remaining unreached R_METHOD relocations.
+       // Remove all remaining unreached R_METHODOFF relocations.
        for _, m := range d.markableMethods {
                for _, r := range m.r {
                        d.cleanupReloc(r)
@@ -167,7 +167,7 @@ var markextra = []string{
 type methodref struct {
        m   methodsig
        src *LSym     // receiver type symbol
-       r   [3]*Reloc // R_METHOD relocations to fields of runtime.method
+       r   [3]*Reloc // R_METHODOFF relocations to fields of runtime.method
 }
 
 func (m methodref) ifn() *LSym { return m.r[1].Sym }
@@ -190,7 +190,7 @@ type deadcodepass struct {
 
 func (d *deadcodepass) cleanupReloc(r *Reloc) {
        if r.Sym.Attr.Reachable() {
-               r.Type = obj.R_ADDR
+               r.Type = obj.R_ADDROFF
        } else {
                if Debug['v'] > 1 {
                        fmt.Fprintf(d.ctxt.Bso, "removing method %s\n", r.Sym.Name)
@@ -217,7 +217,7 @@ func (d *deadcodepass) mark(s, parent *LSym) {
 func (d *deadcodepass) markMethod(m methodref) {
        for _, r := range m.r {
                d.mark(r.Sym, m.src)
-               r.Type = obj.R_ADDR
+               r.Type = obj.R_ADDROFF
        }
 }
 
@@ -291,14 +291,14 @@ func (d *deadcodepass) flood() {
                        }
                }
 
-               mpos := 0 // 0-3, the R_METHOD relocs of runtime.uncommontype
+               mpos := 0 // 0-3, the R_METHODOFF relocs of runtime.uncommontype
                var methods []methodref
                for i := 0; i < len(s.R); i++ {
                        r := &s.R[i]
                        if r.Sym == nil {
                                continue
                        }
-                       if r.Type != obj.R_METHOD {
+                       if r.Type != obj.R_METHODOFF {
                                d.mark(r.Sym, s)
                                continue
                        }
index 7daa8bc81280bb961a3d9196810cae3ba04a2940..5fa8b4c81f3d9ce98805c2ed02d93e51a97c93ad 100644 (file)
@@ -47,9 +47,9 @@ func decode_inuxi(p []byte, sz int) uint64 {
        }
 }
 
-func commonsize() int      { return 6*SysArch.PtrSize + 8 }                 // runtime._type
-func structfieldSize() int { return 3 * SysArch.PtrSize }                   // runtime.structfield
-func uncommonSize() int    { return 2*SysArch.PtrSize + 2*SysArch.IntSize } // runtime.uncommontype
+func commonsize() int      { return 6*SysArch.PtrSize + 8 } // runtime._type
+func structfieldSize() int { return 3 * SysArch.PtrSize }   // runtime.structfield
+func uncommonSize() int    { return 2 * SysArch.PtrSize }   // runtime.uncommontype
 
 // Type.commonType.kind
 func decodetype_kind(s *LSym) uint8 {
@@ -341,12 +341,14 @@ func decodetype_methods(s *LSym) []methodsig {
                // just Sizeof(rtype)
        }
 
-       numMethods := int(decode_inuxi(s.P[off+2*SysArch.PtrSize:], SysArch.IntSize))
-       r := decode_reloc(s, int32(off+SysArch.PtrSize))
-       if r.Sym != s {
-               panic(fmt.Sprintf("method slice pointer in %s leads to a different symbol %s", s, r.Sym))
+       mcount := int(decode_inuxi(s.P[off+SysArch.PtrSize:], 2))
+       moff := int(decode_inuxi(s.P[off+SysArch.PtrSize+2:], 2))
+       off += moff          // offset to array of reflect.method values
+       var sizeofMethod int // sizeof reflect.method in program
+       if SysArch.PtrSize == 4 {
+               sizeofMethod = 4 * SysArch.PtrSize
+       } else {
+               sizeofMethod = 3 * SysArch.PtrSize
        }
-       off = int(r.Add)                    // array of reflect.method values
-       sizeofMethod := 4 * SysArch.PtrSize // sizeof reflect.method in program
-       return decode_methodsig(s, off, sizeofMethod, numMethods)
+       return decode_methodsig(s, off, sizeofMethod, mcount)
 }
index 037c95371808dc10aceea590554d03c7f7d8cfde..2769e0db40a51f06c9f49214babef739e803cc2a 100644 (file)
@@ -90,7 +90,7 @@ func FirstMethodNameBytes(t Type) *byte {
        if ut == nil {
                panic("type has no methods")
        }
-       m := ut.methods[0]
+       m := ut.methods()[0]
        if *m.name.data(0)&(1<<2) == 0 {
                panic("method name does not have pkgPath *string")
        }
index 7104fde60a0788e076f71c9053d763ce4c83bb4c..c7ed402be2661962c54b747ef595e40c651444b2 100644 (file)
@@ -288,10 +288,10 @@ type typeAlg struct {
 
 // Method on non-interface type
 type method struct {
-       name name           // name of method
-       mtyp *rtype         // method type (without receiver)
-       ifn  unsafe.Pointer // fn used in interface call (one-word receiver)
-       tfn  unsafe.Pointer // fn used for normal method call
+       name name    // name of method
+       mtyp typeOff // method type (without receiver)
+       ifn  textOff // fn used in interface call (one-word receiver)
+       tfn  textOff // fn used for normal method call
 }
 
 // uncommonType is present only for types with names or methods
@@ -299,8 +299,9 @@ type method struct {
 // Using a pointer to this struct reduces the overall size required
 // to describe an unnamed type with no methods.
 type uncommonType struct {
-       pkgPath *string  // import path; nil for built-in types like int, string
-       methods []method // methods associated with type
+       pkgPath *string // import path; nil for built-in types like int, string
+       mcount  uint16  // number of methods
+       moff    uint16  // offset from this uncommontype to [mcount]method
 }
 
 // ChanDir represents a channel type's direction.
@@ -589,6 +590,10 @@ var kindNames = []string{
        UnsafePointer: "unsafe.Pointer",
 }
 
+func (t *uncommonType) methods() []method {
+       return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount]
+}
+
 func (t *uncommonType) PkgPath() string {
        if t == nil || t.pkgPath == nil {
                return ""
@@ -596,13 +601,55 @@ func (t *uncommonType) PkgPath() string {
        return *t.pkgPath
 }
 
+// resolveTypeOff resolves an *rtype offset from a base type.
+// The (*rtype).typeOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTextOff resolves an function pointer offset from a base type.
+// The (*rtype).textOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// addReflectOff adds a pointer to the reflection lookup map in the runtime.
+// It returns a new ID that can be used as a typeOff or textOff, and will
+// be resolved correctly. Implemented in the runtime package.
+func addReflectOff(ptr unsafe.Pointer) int32
+
+// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
+// It returns a new typeOff that can be used to refer to the pointer.
+func resolveReflectType(t *rtype) typeOff {
+       return typeOff(addReflectOff(unsafe.Pointer(t)))
+}
+
+// resolveReflectText adds a function pointer to the reflection lookup map in
+// the runtime. It returns a new textOff that can be used to refer to the
+// pointer.
+func resolveReflectText(ptr unsafe.Pointer) textOff {
+       return textOff(addReflectOff(ptr))
+}
+
+type typeOff int32 // offset to an *rtype
+type textOff int32 // offset from top of text section
+
+func (t *rtype) typeOff(off typeOff) *rtype {
+       if off == 0 {
+               return nil
+       }
+       return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
+}
+
+func (t *rtype) textOff(off textOff) unsafe.Pointer {
+       return resolveTextOff(unsafe.Pointer(t), int32(off))
+}
+
 func (t *rtype) uncommon() *uncommonType {
        if t.tflag&tflagUncommon == 0 {
                return nil
        }
        switch t.Kind() {
        case Struct:
-               return &(*structTypeWithMethods)(unsafe.Pointer(t)).u
+               return &(*structTypeUncommon)(unsafe.Pointer(t)).u
        case Ptr:
                type u struct {
                        ptrType
@@ -688,7 +735,7 @@ func (t *rtype) NumMethod() int {
        if ut == nil {
                return 0
        }
-       return len(ut.methods)
+       return int(ut.mcount)
 }
 
 func (t *rtype) Method(i int) (m Method) {
@@ -698,10 +745,10 @@ func (t *rtype) Method(i int) (m Method) {
        }
        ut := t.uncommon()
 
-       if ut == nil || i < 0 || i >= len(ut.methods) {
+       if ut == nil || i < 0 || i >= int(ut.mcount) {
                panic("reflect: Method index out of range")
        }
-       p := &ut.methods[i]
+       p := ut.methods()[i]
        m.Name = p.name.name()
        fl := flag(Func)
        if !p.name.isExported() {
@@ -712,8 +759,9 @@ func (t *rtype) Method(i int) (m Method) {
                m.PkgPath = *pkgPath
                fl |= flagStickyRO
        }
-       if p.mtyp != nil {
-               ft := (*funcType)(unsafe.Pointer(p.mtyp))
+       if p.mtyp != 0 {
+               mtyp := t.typeOff(p.mtyp)
+               ft := (*funcType)(unsafe.Pointer(mtyp))
                in := make([]Type, 0, 1+len(ft.in()))
                in = append(in, t)
                for _, arg := range ft.in() {
@@ -723,9 +771,10 @@ func (t *rtype) Method(i int) (m Method) {
                for _, ret := range ft.out() {
                        out = append(out, ret)
                }
-               mt := FuncOf(in, out, p.mtyp.IsVariadic())
+               mt := FuncOf(in, out, ft.IsVariadic())
                m.Type = mt
-               fn := unsafe.Pointer(&p.tfn)
+               tfn := t.textOff(p.tfn)
+               fn := unsafe.Pointer(&tfn)
                m.Func = Value{mt.(*rtype), fn, fl}
        }
        m.Index = i
@@ -741,8 +790,9 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) {
        if ut == nil {
                return Method{}, false
        }
-       for i := range ut.methods {
-               p := &ut.methods[i]
+       utmethods := ut.methods()
+       for i := 0; i < int(ut.mcount); i++ {
+               p := utmethods[i]
                if p.name.name() == name {
                        return t.Method(i), true
                }
@@ -1430,10 +1480,11 @@ func implements(T, V *rtype) bool {
                return false
        }
        i := 0
-       for j := 0; j < len(v.methods); j++ {
+       vmethods := v.methods()
+       for j := 0; j < int(v.mcount); j++ {
                tm := &t.methods[i]
-               vm := &v.methods[j]
-               if vm.name.name() == tm.name.name() && vm.mtyp == tm.typ {
+               vm := vmethods[j]
+               if vm.name.name() == tm.name.name() && V.typeOff(vm.mtyp) == tm.typ {
                        if i++; i >= len(t.methods) {
                                return true
                        }
@@ -2161,21 +2212,55 @@ func SliceOf(t Type) Type {
        return cachePut(ckey, &slice.rtype)
 }
 
-// structTypeWithMethods is a structType created at runtime with StructOf.
-// It is needed to pin the []method slice from its associated uncommonType struct.
-// Keep in sync with the memory layout of structType.
-type structTypeWithMethods struct {
-       structType
-       u uncommonType
-}
-
 // The structLookupCache caches StructOf lookups.
 // StructOf does not share the common lookupCache since we need to pin
-// the *structType and its associated *uncommonType (especially the
-// []method slice field of that uncommonType.)
+// the memory associated with *structTypeFixedN.
 var structLookupCache struct {
        sync.RWMutex
-       m map[uint32][]*structTypeWithMethods // keyed by hash calculated in StructOf
+       m map[uint32][]interface {
+               common() *rtype
+       } // keyed by hash calculated in StructOf
+}
+
+type structTypeUncommon struct {
+       structType
+       u uncommonType
+}
+
+// A *rtype representing a struct is followed directly in memory by an
+// array of method objects representing the methods attached to the
+// struct. To get the same layout for a run time generated type, we
+// need an array directly following the uncommonType memory. The types
+// structTypeFixed4, ...structTypeFixedN are used to do this.
+//
+// A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
+
+// TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs
+// have no methods, they could be defined at runtime using the StructOf
+// function.
+
+type structTypeFixed4 struct {
+       structType
+       u uncommonType
+       m [4]method
+}
+
+type structTypeFixed8 struct {
+       structType
+       u uncommonType
+       m [8]method
+}
+
+type structTypeFixed16 struct {
+       structType
+       u uncommonType
+       m [16]method
+}
+
+type structTypeFixed32 struct {
+       structType
+       u uncommonType
+       m [32]method
 }
 
 // StructOf returns the struct type containing fields.
@@ -2192,7 +2277,7 @@ func StructOf(fields []StructField) Type {
                typalign   uint8
                comparable = true
                hashable   = true
-               typ        = new(structTypeWithMethods)
+               methods    []method
 
                fs   = make([]structField, len(fields))
                repr = make([]byte, 0, 64)
@@ -2269,7 +2354,6 @@ func StructOf(fields []StructField) Type {
                                                        }
                                                        return recv.Field(ifield).Method(imethod).Call(args)
                                                })
-
                                        } else {
                                                tfn = MakeFunc(m.typ, func(in []Value) []Value {
                                                        var args []Value
@@ -2287,47 +2371,59 @@ func StructOf(fields []StructField) Type {
                                                        }
                                                        return recv.Field(ifield).Method(imethod).Call(args)
                                                })
-
                                        }
 
-                                       typ.u.methods = append(
-                                               typ.u.methods,
-                                               method{
-                                                       name: m.name,
-                                                       mtyp: m.typ,
-                                                       ifn:  unsafe.Pointer(&ifn),
-                                                       tfn:  unsafe.Pointer(&tfn),
-                                               },
-                                       )
+                                       methods = append(methods, method{
+                                               name: m.name,
+                                               mtyp: resolveReflectType(m.typ),
+                                               ifn:  resolveReflectText(unsafe.Pointer(&ifn)),
+                                               tfn:  resolveReflectText(unsafe.Pointer(&tfn)),
+                                       })
                                }
                        case Ptr:
                                ptr := (*ptrType)(unsafe.Pointer(ft))
                                if unt := ptr.uncommon(); unt != nil {
-                                       for _, m := range unt.methods {
+                                       for _, m := range unt.methods() {
                                                if m.name.pkgPath() != nil {
                                                        // TODO(sbinet)
                                                        panic("reflect: embedded interface with unexported method(s) not implemented")
                                                }
-                                               typ.u.methods = append(typ.u.methods, m)
+                                               methods = append(methods, method{
+                                                       name: m.name,
+                                                       mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
+                                                       ifn:  resolveReflectText(ptr.textOff(m.ifn)),
+                                                       tfn:  resolveReflectText(ptr.textOff(m.tfn)),
+                                               })
                                        }
                                }
                                if unt := ptr.elem.uncommon(); unt != nil {
-                                       for _, m := range unt.methods {
+                                       for _, m := range unt.methods() {
                                                if m.name.pkgPath() != nil {
                                                        // TODO(sbinet)
                                                        panic("reflect: embedded interface with unexported method(s) not implemented")
                                                }
-                                               typ.u.methods = append(typ.u.methods, m)
+                                               methods = append(methods, method{
+                                                       name: m.name,
+                                                       mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
+                                                       ifn:  resolveReflectText(ptr.elem.textOff(m.ifn)),
+                                                       tfn:  resolveReflectText(ptr.elem.textOff(m.tfn)),
+                                               })
                                        }
                                }
                        default:
                                if unt := ft.uncommon(); unt != nil {
-                                       for _, m := range unt.methods {
+                                       for _, m := range unt.methods() {
                                                if m.name.pkgPath() != nil {
                                                        // TODO(sbinet)
                                                        panic("reflect: embedded interface with unexported method(s) not implemented")
                                                }
-                                               typ.u.methods = append(typ.u.methods, m)
+                                               methods = append(methods, method{
+                                                       name: m.name,
+                                                       mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
+                                                       ifn:  resolveReflectText(ft.textOff(m.ifn)),
+                                                       tfn:  resolveReflectText(ft.textOff(m.tfn)),
+                                               })
+
                                        }
                                }
                        }
@@ -2359,6 +2455,49 @@ func StructOf(fields []StructField) Type {
 
                fs[i] = f
        }
+
+       var typ *structType
+       var ut *uncommonType
+       var typPin interface {
+               common() *rtype
+       } // structTypeFixedN
+
+       switch {
+       case len(methods) == 0:
+               t := new(structTypeUncommon)
+               typ = &t.structType
+               ut = &t.u
+               typPin = t
+       case len(methods) <= 4:
+               t := new(structTypeFixed4)
+               typ = &t.structType
+               ut = &t.u
+               copy(t.m[:], methods)
+               typPin = t
+       case len(methods) <= 8:
+               t := new(structTypeFixed8)
+               typ = &t.structType
+               ut = &t.u
+               copy(t.m[:], methods)
+               typPin = t
+       case len(methods) <= 16:
+               t := new(structTypeFixed16)
+               typ = &t.structType
+               ut = &t.u
+               copy(t.m[:], methods)
+               typPin = t
+       case len(methods) <= 32:
+               t := new(structTypeFixed32)
+               typ = &t.structType
+               ut = &t.u
+               copy(t.m[:], methods)
+               typPin = t
+       default:
+               panic("reflect.StructOf: too many methods")
+       }
+       ut.mcount = uint16(len(methods))
+       ut.moff = uint16(unsafe.Sizeof(uncommonType{}))
+
        if len(fs) > 0 {
                repr = append(repr, ' ')
        }
@@ -2372,15 +2511,16 @@ func StructOf(fields []StructField) Type {
        // Make the struct type.
        var istruct interface{} = struct{}{}
        prototype := *(**structType)(unsafe.Pointer(&istruct))
-       typ.structType = *prototype
-       typ.structType.fields = fs
+       *typ = *prototype
+       typ.fields = fs
 
        // Look in cache
        structLookupCache.RLock()
-       for _, t := range structLookupCache.m[hash] {
-               if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) {
+       for _, st := range structLookupCache.m[hash] {
+               t := st.common()
+               if haveIdenticalUnderlyingType(&typ.rtype, t) {
                        structLookupCache.RUnlock()
-                       return &t.rtype
+                       return t
                }
        }
        structLookupCache.RUnlock()
@@ -2389,11 +2529,14 @@ func StructOf(fields []StructField) Type {
        structLookupCache.Lock()
        defer structLookupCache.Unlock()
        if structLookupCache.m == nil {
-               structLookupCache.m = make(map[uint32][]*structTypeWithMethods)
+               structLookupCache.m = make(map[uint32][]interface {
+                       common() *rtype
+               })
        }
-       for _, t := range structLookupCache.m[hash] {
-               if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) {
-                       return &t.rtype
+       for _, st := range structLookupCache.m[hash] {
+               t := st.common()
+               if haveIdenticalUnderlyingType(&typ.rtype, t) {
+                       return t
                }
        }
 
@@ -2403,9 +2546,8 @@ func StructOf(fields []StructField) Type {
                        // even if 't' wasn't a structType with methods, we should be ok
                        // as the 'u uncommonType' field won't be accessed except when
                        // tflag&tflagUncommon is set.
-                       tt := (*structTypeWithMethods)(unsafe.Pointer(t))
-                       structLookupCache.m[hash] = append(structLookupCache.m[hash], tt)
-                       return &tt.rtype
+                       structLookupCache.m[hash] = append(structLookupCache.m[hash], t)
+                       return t
                }
        }
 
@@ -2414,7 +2556,7 @@ func StructOf(fields []StructField) Type {
        typ.size = size
        typ.align = typalign
        typ.fieldAlign = typalign
-       if len(typ.u.methods) > 0 {
+       if len(methods) > 0 {
                typ.tflag |= tflagUncommon
        }
        if !hasPtr {
@@ -2514,7 +2656,7 @@ func StructOf(fields []StructField) Type {
                typ.kind &^= kindDirectIface
        }
 
-       structLookupCache.m[hash] = append(structLookupCache.m[hash], typ)
+       structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin)
        return &typ.rtype
 }
 
@@ -2533,6 +2675,7 @@ func runtimeStructField(field StructField) structField {
                }
        }
 
+       _ = resolveReflectType(field.Type.common())
        return structField{
                name:   newName(field.Name, string(field.Tag), field.PkgPath, exported),
                typ:    field.Type.common(),
index 262545d973ccc4570beeee8a8d6617defc06ed29..d72c14e9e1492fb2e72bfe77648455225eab47d2 100644 (file)
@@ -566,15 +566,16 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn
        } else {
                rcvrtype = v.typ
                ut := v.typ.uncommon()
-               if ut == nil || uint(i) >= uint(len(ut.methods)) {
+               if ut == nil || uint(i) >= uint(ut.mcount) {
                        panic("reflect: internal error: invalid method index")
                }
-               m := &ut.methods[i]
+               m := ut.methods()[i]
                if !m.name.isExported() {
                        panic("reflect: " + op + " of unexported method")
                }
-               fn = unsafe.Pointer(&m.ifn)
-               t = m.mtyp
+               ifn := v.typ.textOff(m.ifn)
+               fn = unsafe.Pointer(&ifn)
+               t = v.typ.typeOff(m.mtyp)
        }
        return
 }
@@ -1687,11 +1688,11 @@ func (v Value) Type() Type {
        }
        // Method on concrete type.
        ut := v.typ.uncommon()
-       if ut == nil || uint(i) >= uint(len(ut.methods)) {
+       if ut == nil || uint(i) >= uint(ut.mcount) {
                panic("reflect: internal error: invalid method index")
        }
-       m := &ut.methods[i]
-       return m.mtyp
+       m := ut.methods()[i]
+       return v.typ.typeOff(m.mtyp)
 }
 
 // Uint returns v's underlying value, as a uint64.
index a4c962fb7a4978dbd9dc8b393c63e6cd0dccd3e3..700bdc2f48c11d043f62386db4c90893f49372ec 100644 (file)
@@ -93,7 +93,8 @@ func additab(m *itab, locked, canfail bool) {
        // so can iterate over both in lock step;
        // the loop is O(ni+nt) not O(ni*nt).
        ni := len(inter.mhdr)
-       nt := len(x.mhdr)
+       nt := int(x.mcount)
+       xmhdr := (*[1 << 16]method)(add(unsafe.Pointer(x), uintptr(x.moff)))[:nt:nt]
        j := 0
        for k := 0; k < ni; k++ {
                i := &inter.mhdr[k]
@@ -104,15 +105,16 @@ func additab(m *itab, locked, canfail bool) {
                        ipkg = inter.pkgpath
                }
                for ; j < nt; j++ {
-                       t := &x.mhdr[j]
-                       if t.mtyp == itype && t.name.name() == iname {
+                       t := &xmhdr[j]
+                       if typ.typeOff(t.mtyp) == itype && t.name.name() == iname {
                                pkgPath := t.name.pkgPath()
                                if pkgPath == nil {
                                        pkgPath = x.pkgpath
                                }
                                if t.name.isExported() || pkgPath == ipkg {
                                        if m != nil {
-                                               *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn
+                                               ifn := typ.textOff(t.ifn)
+                                               *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = ifn
                                        }
                                        goto nextimethod
                                }
index 1a9dbd6c53fa943fc796a2d7abf85b1b140eb56f..98a986cd63758671763a44cc3c497ac351a5a9fc 100644 (file)
@@ -435,9 +435,10 @@ func schedinit() {
        tracebackinit()
        moduledataverify()
        stackinit()
-       itabsinit()
        mallocinit()
        mcommoninit(_g_.m)
+       typelinksinit()
+       itabsinit()
 
        msigsave(_g_.m)
        initSigmask = _g_.m.sigmask
index e1956569fd794885425ae2374c0903515db42a75..02aeedaf75ee2ac61e7362859476d982f4a6053e 100644 (file)
@@ -486,3 +486,36 @@ func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
        }
        return sections, ret
 }
+
+// reflect_resolveTypeOff resolves an *rtype offset from a base type.
+//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
+func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+       return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
+}
+
+// reflect_resolveTextOff resolves an function pointer offset from a base type.
+//go:linkname reflect_resolveTextOff reflect.resolveTextOff
+func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+       return (*_type)(rtype).textOff(textOff(off))
+
+}
+
+// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
+//go:linkname reflect_addReflectOff reflect.addReflectOff
+func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
+       lock(&reflectOffs.lock)
+       if reflectOffs.m == nil {
+               reflectOffs.m = make(map[int32]unsafe.Pointer)
+               reflectOffs.minv = make(map[unsafe.Pointer]int32)
+               reflectOffs.next = -1
+       }
+       id, found := reflectOffs.minv[ptr]
+       if !found {
+               id = reflectOffs.next
+               reflectOffs.next-- // use negative offsets as IDs to aid debugging
+               reflectOffs.m[id] = ptr
+               reflectOffs.minv[ptr] = id
+       }
+       unlock(&reflectOffs.lock)
+       return id
+}
index 8c70f22c1fc2f150b1ae421bd1b3ee0f0d530206..2df390253a6d1a26fdbbdd5b0f7e252c4fccc583 100644 (file)
@@ -137,6 +137,8 @@ type moduledata struct {
 
        gcdatamask, gcbssmask bitvector
 
+       typemap map[typeOff]*_type // offset to *_rtype in previous module
+
        next *moduledata
 }
 
index fbf6f9973ce26d5438e6ac9c7b73a54093f2561c..86131d3ff3992a27dfb040eae8d8f466dbcc707b 100644 (file)
@@ -131,6 +131,92 @@ func (t *_type) name() string {
        return t._string[i+1:]
 }
 
+// reflectOffs holds type offsets defined at run time by the reflect package.
+//
+// When a type is defined at run time, its *rtype data lives on the heap.
+// There are a wide range of possible addresses the heap may use, that
+// may not be representable as a 32-bit offset. Moreover the GC may
+// one day start moving heap memory, in which case there is no stable
+// offset that can be defined.
+//
+// To provide stable offsets, we add pin *rtype objects in a global map
+// and treat the offset as an identifier. We use negative offsets that
+// do not overlap with any compile-time module offsets.
+//
+// Entries are created by reflect.addReflectOff.
+var reflectOffs struct {
+       lock mutex
+       next int32
+       m    map[int32]unsafe.Pointer
+       minv map[unsafe.Pointer]int32
+}
+
+func (t *_type) typeOff(off typeOff) *_type {
+       if off == 0 {
+               return nil
+       }
+       base := uintptr(unsafe.Pointer(t))
+       var md *moduledata
+       for next := &firstmoduledata; next != nil; next = next.next {
+               if base >= next.types && base < next.etypes {
+                       md = next
+                       break
+               }
+       }
+       if md == nil {
+               lock(&reflectOffs.lock)
+               res := reflectOffs.m[int32(off)]
+               unlock(&reflectOffs.lock)
+               if res == nil {
+                       println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
+                       for next := &firstmoduledata; next != nil; next = next.next {
+                               println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+                       }
+                       throw("runtime: type offset base pointer out of range")
+               }
+               return (*_type)(res)
+       }
+       if t := md.typemap[off]; t != nil {
+               return t
+       }
+       res := md.types + uintptr(off)
+       if res > md.etypes {
+               println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
+               throw("runtime: type offset out of range")
+       }
+       return (*_type)(unsafe.Pointer(res))
+}
+
+func (t *_type) textOff(off textOff) unsafe.Pointer {
+       base := uintptr(unsafe.Pointer(t))
+       var md *moduledata
+       for next := &firstmoduledata; next != nil; next = next.next {
+               if base >= next.types && base < next.etypes {
+                       md = next
+                       break
+               }
+       }
+       if md == nil {
+               lock(&reflectOffs.lock)
+               res := reflectOffs.m[int32(off)]
+               unlock(&reflectOffs.lock)
+               if res == nil {
+                       println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
+                       for next := &firstmoduledata; next != nil; next = next.next {
+                               println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+                       }
+                       throw("runtime: text offset base pointer out of range")
+               }
+               return res
+       }
+       res := md.text + uintptr(off)
+       if res > md.etext {
+               println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext))
+               throw("runtime: text offset out of range")
+       }
+       return unsafe.Pointer(res)
+}
+
 func (t *functype) in() []*_type {
        // See funcType in reflect/type.go for details on data layout.
        uadd := uintptr(unsafe.Sizeof(functype{}))
@@ -154,16 +240,20 @@ func (t *functype) dotdotdot() bool {
        return t.outCount&(1<<15) != 0
 }
 
+type typeOff int32
+type textOff int32
+
 type method struct {
        name name
-       mtyp *_type
-       ifn  unsafe.Pointer
-       tfn  unsafe.Pointer
+       mtyp typeOff
+       ifn  textOff
+       tfn  textOff
 }
 
 type uncommontype struct {
        pkgpath *string
-       mhdr    []method
+       mcount  uint16 // number of methods
+       moff    uint16 // offset from this uncommontype to [mcount]method
 }
 
 type imethod struct {
@@ -270,6 +360,18 @@ func (n *name) name() (s string) {
        return s
 }
 
+func (n *name) tag() (s string) {
+       tl := n.tagLen()
+       if tl == 0 {
+               return ""
+       }
+       nl := n.nameLen()
+       hdr := (*stringStruct)(unsafe.Pointer(&s))
+       hdr.str = unsafe.Pointer(n.data(3 + nl + 2))
+       hdr.len = tl
+       return s
+}
+
 func (n *name) pkgPath() *string {
        if *n.data(0)&(1<<2) == 0 {
                return nil
@@ -281,3 +383,200 @@ func (n *name) pkgPath() *string {
        off = int(round(uintptr(off), sys.PtrSize))
        return *(**string)(unsafe.Pointer(n.data(off)))
 }
+
+// typelinksinit scans the types from extra modules and builds the
+// moduledata typemap used to de-duplicate type pointers.
+func typelinksinit() {
+       if firstmoduledata.next == nil {
+               return
+       }
+       typehash := make(map[uint32][]*_type)
+
+       modules := []*moduledata{}
+       for md := &firstmoduledata; md != nil; md = md.next {
+               modules = append(modules, md)
+       }
+       prev, modules := modules[len(modules)-1], modules[:len(modules)-1]
+       for len(modules) > 0 {
+               // Collect types from the previous module into typehash.
+       collect:
+               for _, tl := range prev.typelinks {
+                       var t *_type
+                       if prev.typemap == nil {
+                               t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
+                       } else {
+                               t = prev.typemap[typeOff(tl)]
+                       }
+                       // Add to typehash if not seen before.
+                       tlist := typehash[t.hash]
+                       for _, tcur := range tlist {
+                               if tcur == t {
+                                       continue collect
+                               }
+                       }
+                       typehash[t.hash] = append(tlist, t)
+               }
+
+               // If any of this module's typelinks match a type from a
+               // prior module, prefer that prior type by adding the offset
+               // to this module's typemap.
+               md := modules[len(modules)-1]
+               md.typemap = make(map[typeOff]*_type, len(md.typelinks))
+               for _, tl := range md.typelinks {
+                       t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
+                       for _, candidate := range typehash[t.hash] {
+                               if typesEqual(t, candidate) {
+                                       t = candidate
+                                       break
+                               }
+                       }
+                       md.typemap[typeOff(tl)] = t
+               }
+
+               prev, modules = md, modules[:len(modules)-1]
+       }
+}
+
+// typesEqual reports whether two types are equal.
+//
+// Everywhere in the runtime and reflect packages, it is assumed that
+// there is exactly one *_type per Go type, so that pointer equality
+// can be used to test if types are equal. There is one place that
+// breaks this assumption: buildmode=shared. In this case a type can
+// appear as two different pieces of memory. This is hidden from the
+// runtime and reflect package by the per-module typemap built in
+// typelinksinit. It uses typesEqual to map types from later modules
+// back into earlier ones.
+//
+// Only typelinksinit needs this function.
+func typesEqual(t, v *_type) bool {
+       if t == v {
+               return true
+       }
+       kind := t.kind & kindMask
+       if kind != v.kind&kindMask {
+               return false
+       }
+       if t._string != v._string {
+               return false
+       }
+       ut := t.uncommon()
+       uv := v.uncommon()
+       if ut != nil || uv != nil {
+               if ut == nil || uv == nil {
+                       return false
+               }
+               if !pkgPathEqual(ut.pkgpath, uv.pkgpath) {
+                       return false
+               }
+       }
+       if kindBool <= kind && kind <= kindComplex128 {
+               return true
+       }
+       switch kind {
+       case kindString, kindUnsafePointer:
+               return true
+       case kindArray:
+               at := (*arraytype)(unsafe.Pointer(t))
+               av := (*arraytype)(unsafe.Pointer(v))
+               return typesEqual(at.elem, av.elem) && at.len == av.len
+       case kindChan:
+               ct := (*chantype)(unsafe.Pointer(t))
+               cv := (*chantype)(unsafe.Pointer(v))
+               return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem)
+       case kindFunc:
+               ft := (*functype)(unsafe.Pointer(t))
+               fv := (*functype)(unsafe.Pointer(v))
+               if ft.outCount != fv.outCount || ft.inCount != fv.inCount {
+                       return false
+               }
+               tin, vin := ft.in(), fv.in()
+               for i := 0; i < len(tin); i++ {
+                       if !typesEqual(tin[i], vin[i]) {
+                               return false
+                       }
+               }
+               tout, vout := ft.out(), fv.out()
+               for i := 0; i < len(tout); i++ {
+                       if !typesEqual(tout[i], vout[i]) {
+                               return false
+                       }
+               }
+               return true
+       case kindInterface:
+               it := (*interfacetype)(unsafe.Pointer(t))
+               iv := (*interfacetype)(unsafe.Pointer(v))
+               if !pkgPathEqual(it.pkgpath, iv.pkgpath) {
+                       return false
+               }
+               if len(it.mhdr) != len(iv.mhdr) {
+                       return false
+               }
+               for i := range it.mhdr {
+                       tm := &it.mhdr[i]
+                       vm := &iv.mhdr[i]
+                       if tm.name.name() != vm.name.name() {
+                               return false
+                       }
+                       if !pkgPathEqual(tm.name.pkgPath(), vm.name.pkgPath()) {
+                               return false
+                       }
+                       if !typesEqual(tm._type, vm._type) {
+                               return false
+                       }
+               }
+               return true
+       case kindMap:
+               mt := (*maptype)(unsafe.Pointer(t))
+               mv := (*maptype)(unsafe.Pointer(v))
+               return typesEqual(mt.key, mv.key) && typesEqual(mt.elem, mv.elem)
+       case kindPtr:
+               pt := (*ptrtype)(unsafe.Pointer(t))
+               pv := (*ptrtype)(unsafe.Pointer(v))
+               return typesEqual(pt.elem, pv.elem)
+       case kindSlice:
+               st := (*slicetype)(unsafe.Pointer(t))
+               sv := (*slicetype)(unsafe.Pointer(v))
+               return typesEqual(st.elem, sv.elem)
+       case kindStruct:
+               st := (*structtype)(unsafe.Pointer(t))
+               sv := (*structtype)(unsafe.Pointer(v))
+               if len(st.fields) != len(sv.fields) {
+                       return false
+               }
+               for i := range st.fields {
+                       tf := &st.fields[i]
+                       vf := &sv.fields[i]
+                       if tf.name.name() != vf.name.name() {
+                               return false
+                       }
+                       if !pkgPathEqual(tf.name.pkgPath(), vf.name.pkgPath()) {
+                               return false
+                       }
+                       if !typesEqual(tf.typ, vf.typ) {
+                               return false
+                       }
+                       if tf.name.tag() != vf.name.tag() {
+                               return false
+                       }
+                       if tf.offset != vf.offset {
+                               return false
+                       }
+               }
+               return true
+       default:
+               println("runtime: impossible type kind", kind)
+               throw("runtime: impossible type kind")
+               return false
+       }
+}
+
+func pkgPathEqual(p, q *string) bool {
+       if p == q {
+               return true
+       }
+       if p == nil || q == nil {
+               return false
+       }
+       return *p == *q
+}