if t.Sym == nil && len(methods(t)) == 0 {
return 0
}
- return 2*Widthptr + 2*Widthint
+ return 2 * Widthptr
}
func makefield(name string, t *Type) *Field {
ot = dgopkgpath(s, ot, typePkg(t))
- // slice header
- ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+dataAdd)
-
- n := len(m)
- ot = duintxx(s, ot, uint64(n), Widthint)
- ot = duintxx(s, ot, uint64(n), Widthint)
+ dataAdd += Widthptr + 2 + 2
+ if Widthptr == 8 {
+ dataAdd += 4
+ }
+ mcount := len(m)
+ if mcount != int(uint16(mcount)) {
+ Fatalf("too many methods on %s: %d", t, mcount)
+ }
+ if dataAdd != int(uint16(dataAdd)) {
+ Fatalf("methods are too far away on %s: %d", t, dataAdd)
+ }
+ ot = duint16(s, ot, uint16(mcount))
+ ot = duint16(s, ot, uint16(dataAdd))
+ if Widthptr == 8 {
+ ot = duint32(s, ot, 0) // align for following pointers
+ }
return ot
}
// dextratypeData dumps the backing array for the []method field of
// runtime.uncommontype.
func dextratypeData(s *Sym, ot int, t *Type) int {
+ lsym := Linksym(s)
for _, a := range methods(t) {
// ../../../../runtime/type.go:/method
exported := exportname(a.name)
pkg = a.pkg
}
ot = dname(s, ot, a.name, "", pkg, exported)
- ot = dmethodptr(s, ot, dtypesym(a.mtype))
- ot = dmethodptr(s, ot, a.isym)
- ot = dmethodptr(s, ot, a.tsym)
+ ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype)))
+ ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym))
+ ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym))
+ if Widthptr == 8 {
+ ot = duintxxLSym(lsym, ot, 0, 4) // pad to reflect.method size
+ }
}
return ot
}
-func dmethodptr(s *Sym, off int, x *Sym) int {
- duintptr(s, off, 0)
- r := obj.Addrel(Linksym(s))
- r.Off = int32(off)
- r.Siz = uint8(Widthptr)
- r.Sym = Linksym(x)
- r.Type = obj.R_METHOD
- return off + Widthptr
+func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int {
+ duintxxLSym(s, ot, 0, 4)
+ r := obj.Addrel(s)
+ r.Off = int32(ot)
+ r.Siz = 4
+ r.Sym = x
+ r.Type = obj.R_METHODOFF
+ return ot + 4
}
var kinds = []int{
ggloblsym(s, int32(ot), int16(dupok|obj.RODATA))
// generate typelink.foo pointing at s = type.foo.
+ //
// The linker will leave a table of all the typelinks for
- // types in the binary, so reflect can find them.
- // We only need the link for unnamed composites that
- // we want be able to find.
- if t.Sym == nil {
+ // types in the binary, so the runtime can find them.
+ //
+ // When buildmode=shared, all types are in typelinks so the
+ // runtime can deduplicate type pointers.
+ keep := Ctxt.Flag_dynlink
+ if !keep && t.Sym == nil {
+ // For an unnamed type, we only need the link if the type can
+ // be created at run time by reflect.PtrTo and similar
+ // functions. If the type exists in the program, those
+ // functions must return the existing type structure rather
+ // than creating a new one.
switch t.Etype {
case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSTRUCT:
- slink := typelinkLSym(t)
- dsymptrOffLSym(slink, 0, Linksym(s), 0)
- ggloblLSym(slink, 4, int16(dupok|obj.RODATA))
+ keep = true
}
}
+ if keep {
+ slink := typelinkLSym(t)
+ dsymptrOffLSym(slink, 0, Linksym(s), 0)
+ ggloblLSym(slink, 4, int16(dupok|obj.RODATA))
+ }
return s
}
// R_ADDRMIPS (only used on mips64) resolves to a 32-bit external address,
// by loading the address into a register with two instructions (lui, ori).
R_ADDRMIPS
- // R_ADDROFF resolves to an offset from the beginning of the section holding
- // the data being relocated to the referenced symbol.
+ // R_ADDROFF resolves to a 32-bit offset from the beginning of the section
+ // holding the data being relocated to the referenced symbol.
R_ADDROFF
R_SIZE
R_CALL
// should be linked into the final binary, even if there are no other
// direct references. (This is used for types reachable by reflection.)
R_USETYPE
- // R_METHOD resolves to an *rtype for a method.
- // It is used when linking from the uncommonType of another *rtype, and
- // may be set to zero by the linker if it determines the method text is
- // unreachable by the linked program.
- R_METHOD
+ // R_METHODOFF resolves to a 32-bit offset from the beginning of the section
+ // holding the data being relocated to the referenced symbol.
+ // It is a variant of R_ADDROFF used when linking from the uncommonType of a
+ // *rtype, and may be set to zero by the linker if it determines the method
+ // text is unreachable by the linked program.
+ R_METHODOFF
R_POWER_TOC
R_GOTPCREL
// R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address
//
// This flood fill is wrapped in logic for pruning unused methods.
// All methods are mentioned by relocations on their receiver's *rtype.
-// These relocations are specially defined as R_METHOD by the compiler
+// These relocations are specially defined as R_METHODOFF by the compiler
// so we can detect and manipulated them here.
//
// There are three ways a method of a reachable type can be invoked:
d.flood()
}
- // Remove all remaining unreached R_METHOD relocations.
+ // Remove all remaining unreached R_METHODOFF relocations.
for _, m := range d.markableMethods {
for _, r := range m.r {
d.cleanupReloc(r)
type methodref struct {
m methodsig
src *LSym // receiver type symbol
- r [3]*Reloc // R_METHOD relocations to fields of runtime.method
+ r [3]*Reloc // R_METHODOFF relocations to fields of runtime.method
}
func (m methodref) ifn() *LSym { return m.r[1].Sym }
func (d *deadcodepass) cleanupReloc(r *Reloc) {
if r.Sym.Attr.Reachable() {
- r.Type = obj.R_ADDR
+ r.Type = obj.R_ADDROFF
} else {
if Debug['v'] > 1 {
fmt.Fprintf(d.ctxt.Bso, "removing method %s\n", r.Sym.Name)
func (d *deadcodepass) markMethod(m methodref) {
for _, r := range m.r {
d.mark(r.Sym, m.src)
- r.Type = obj.R_ADDR
+ r.Type = obj.R_ADDROFF
}
}
}
}
- mpos := 0 // 0-3, the R_METHOD relocs of runtime.uncommontype
+ mpos := 0 // 0-3, the R_METHODOFF relocs of runtime.uncommontype
var methods []methodref
for i := 0; i < len(s.R); i++ {
r := &s.R[i]
if r.Sym == nil {
continue
}
- if r.Type != obj.R_METHOD {
+ if r.Type != obj.R_METHODOFF {
d.mark(r.Sym, s)
continue
}
}
}
-func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type
-func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield
-func uncommonSize() int { return 2*SysArch.PtrSize + 2*SysArch.IntSize } // runtime.uncommontype
+func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type
+func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield
+func uncommonSize() int { return 2 * SysArch.PtrSize } // runtime.uncommontype
// Type.commonType.kind
func decodetype_kind(s *LSym) uint8 {
// just Sizeof(rtype)
}
- numMethods := int(decode_inuxi(s.P[off+2*SysArch.PtrSize:], SysArch.IntSize))
- r := decode_reloc(s, int32(off+SysArch.PtrSize))
- if r.Sym != s {
- panic(fmt.Sprintf("method slice pointer in %s leads to a different symbol %s", s, r.Sym))
+ mcount := int(decode_inuxi(s.P[off+SysArch.PtrSize:], 2))
+ moff := int(decode_inuxi(s.P[off+SysArch.PtrSize+2:], 2))
+ off += moff // offset to array of reflect.method values
+ var sizeofMethod int // sizeof reflect.method in program
+ if SysArch.PtrSize == 4 {
+ sizeofMethod = 4 * SysArch.PtrSize
+ } else {
+ sizeofMethod = 3 * SysArch.PtrSize
}
- off = int(r.Add) // array of reflect.method values
- sizeofMethod := 4 * SysArch.PtrSize // sizeof reflect.method in program
- return decode_methodsig(s, off, sizeofMethod, numMethods)
+ return decode_methodsig(s, off, sizeofMethod, mcount)
}
if ut == nil {
panic("type has no methods")
}
- m := ut.methods[0]
+ m := ut.methods()[0]
if *m.name.data(0)&(1<<2) == 0 {
panic("method name does not have pkgPath *string")
}
// Method on non-interface type
type method struct {
- name name // name of method
- mtyp *rtype // method type (without receiver)
- ifn unsafe.Pointer // fn used in interface call (one-word receiver)
- tfn unsafe.Pointer // fn used for normal method call
+ name name // name of method
+ mtyp typeOff // method type (without receiver)
+ ifn textOff // fn used in interface call (one-word receiver)
+ tfn textOff // fn used for normal method call
}
// uncommonType is present only for types with names or methods
// Using a pointer to this struct reduces the overall size required
// to describe an unnamed type with no methods.
type uncommonType struct {
- pkgPath *string // import path; nil for built-in types like int, string
- methods []method // methods associated with type
+ pkgPath *string // import path; nil for built-in types like int, string
+ mcount uint16 // number of methods
+ moff uint16 // offset from this uncommontype to [mcount]method
}
// ChanDir represents a channel type's direction.
UnsafePointer: "unsafe.Pointer",
}
+func (t *uncommonType) methods() []method {
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount]
+}
+
func (t *uncommonType) PkgPath() string {
if t == nil || t.pkgPath == nil {
return ""
return *t.pkgPath
}
+// resolveTypeOff resolves an *rtype offset from a base type.
+// The (*rtype).typeOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// resolveTextOff resolves an function pointer offset from a base type.
+// The (*rtype).textOff method is a convenience wrapper for this function.
+// Implemented in the runtime package.
+func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
+
+// addReflectOff adds a pointer to the reflection lookup map in the runtime.
+// It returns a new ID that can be used as a typeOff or textOff, and will
+// be resolved correctly. Implemented in the runtime package.
+func addReflectOff(ptr unsafe.Pointer) int32
+
+// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
+// It returns a new typeOff that can be used to refer to the pointer.
+func resolveReflectType(t *rtype) typeOff {
+ return typeOff(addReflectOff(unsafe.Pointer(t)))
+}
+
+// resolveReflectText adds a function pointer to the reflection lookup map in
+// the runtime. It returns a new textOff that can be used to refer to the
+// pointer.
+func resolveReflectText(ptr unsafe.Pointer) textOff {
+ return textOff(addReflectOff(ptr))
+}
+
+type typeOff int32 // offset to an *rtype
+type textOff int32 // offset from top of text section
+
+func (t *rtype) typeOff(off typeOff) *rtype {
+ if off == 0 {
+ return nil
+ }
+ return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
+}
+
+func (t *rtype) textOff(off textOff) unsafe.Pointer {
+ return resolveTextOff(unsafe.Pointer(t), int32(off))
+}
+
func (t *rtype) uncommon() *uncommonType {
if t.tflag&tflagUncommon == 0 {
return nil
}
switch t.Kind() {
case Struct:
- return &(*structTypeWithMethods)(unsafe.Pointer(t)).u
+ return &(*structTypeUncommon)(unsafe.Pointer(t)).u
case Ptr:
type u struct {
ptrType
if ut == nil {
return 0
}
- return len(ut.methods)
+ return int(ut.mcount)
}
func (t *rtype) Method(i int) (m Method) {
}
ut := t.uncommon()
- if ut == nil || i < 0 || i >= len(ut.methods) {
+ if ut == nil || i < 0 || i >= int(ut.mcount) {
panic("reflect: Method index out of range")
}
- p := &ut.methods[i]
+ p := ut.methods()[i]
m.Name = p.name.name()
fl := flag(Func)
if !p.name.isExported() {
m.PkgPath = *pkgPath
fl |= flagStickyRO
}
- if p.mtyp != nil {
- ft := (*funcType)(unsafe.Pointer(p.mtyp))
+ if p.mtyp != 0 {
+ mtyp := t.typeOff(p.mtyp)
+ ft := (*funcType)(unsafe.Pointer(mtyp))
in := make([]Type, 0, 1+len(ft.in()))
in = append(in, t)
for _, arg := range ft.in() {
for _, ret := range ft.out() {
out = append(out, ret)
}
- mt := FuncOf(in, out, p.mtyp.IsVariadic())
+ mt := FuncOf(in, out, ft.IsVariadic())
m.Type = mt
- fn := unsafe.Pointer(&p.tfn)
+ tfn := t.textOff(p.tfn)
+ fn := unsafe.Pointer(&tfn)
m.Func = Value{mt.(*rtype), fn, fl}
}
m.Index = i
if ut == nil {
return Method{}, false
}
- for i := range ut.methods {
- p := &ut.methods[i]
+ utmethods := ut.methods()
+ for i := 0; i < int(ut.mcount); i++ {
+ p := utmethods[i]
if p.name.name() == name {
return t.Method(i), true
}
return false
}
i := 0
- for j := 0; j < len(v.methods); j++ {
+ vmethods := v.methods()
+ for j := 0; j < int(v.mcount); j++ {
tm := &t.methods[i]
- vm := &v.methods[j]
- if vm.name.name() == tm.name.name() && vm.mtyp == tm.typ {
+ vm := vmethods[j]
+ if vm.name.name() == tm.name.name() && V.typeOff(vm.mtyp) == tm.typ {
if i++; i >= len(t.methods) {
return true
}
return cachePut(ckey, &slice.rtype)
}
-// structTypeWithMethods is a structType created at runtime with StructOf.
-// It is needed to pin the []method slice from its associated uncommonType struct.
-// Keep in sync with the memory layout of structType.
-type structTypeWithMethods struct {
- structType
- u uncommonType
-}
-
// The structLookupCache caches StructOf lookups.
// StructOf does not share the common lookupCache since we need to pin
-// the *structType and its associated *uncommonType (especially the
-// []method slice field of that uncommonType.)
+// the memory associated with *structTypeFixedN.
var structLookupCache struct {
sync.RWMutex
- m map[uint32][]*structTypeWithMethods // keyed by hash calculated in StructOf
+ m map[uint32][]interface {
+ common() *rtype
+ } // keyed by hash calculated in StructOf
+}
+
+type structTypeUncommon struct {
+ structType
+ u uncommonType
+}
+
+// A *rtype representing a struct is followed directly in memory by an
+// array of method objects representing the methods attached to the
+// struct. To get the same layout for a run time generated type, we
+// need an array directly following the uncommonType memory. The types
+// structTypeFixed4, ...structTypeFixedN are used to do this.
+//
+// A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
+
+// TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs
+// have no methods, they could be defined at runtime using the StructOf
+// function.
+
+type structTypeFixed4 struct {
+ structType
+ u uncommonType
+ m [4]method
+}
+
+type structTypeFixed8 struct {
+ structType
+ u uncommonType
+ m [8]method
+}
+
+type structTypeFixed16 struct {
+ structType
+ u uncommonType
+ m [16]method
+}
+
+type structTypeFixed32 struct {
+ structType
+ u uncommonType
+ m [32]method
}
// StructOf returns the struct type containing fields.
typalign uint8
comparable = true
hashable = true
- typ = new(structTypeWithMethods)
+ methods []method
fs = make([]structField, len(fields))
repr = make([]byte, 0, 64)
}
return recv.Field(ifield).Method(imethod).Call(args)
})
-
} else {
tfn = MakeFunc(m.typ, func(in []Value) []Value {
var args []Value
}
return recv.Field(ifield).Method(imethod).Call(args)
})
-
}
- typ.u.methods = append(
- typ.u.methods,
- method{
- name: m.name,
- mtyp: m.typ,
- ifn: unsafe.Pointer(&ifn),
- tfn: unsafe.Pointer(&tfn),
- },
- )
+ methods = append(methods, method{
+ name: m.name,
+ mtyp: resolveReflectType(m.typ),
+ ifn: resolveReflectText(unsafe.Pointer(&ifn)),
+ tfn: resolveReflectText(unsafe.Pointer(&tfn)),
+ })
}
case Ptr:
ptr := (*ptrType)(unsafe.Pointer(ft))
if unt := ptr.uncommon(); unt != nil {
- for _, m := range unt.methods {
+ for _, m := range unt.methods() {
if m.name.pkgPath() != nil {
// TODO(sbinet)
panic("reflect: embedded interface with unexported method(s) not implemented")
}
- typ.u.methods = append(typ.u.methods, m)
+ methods = append(methods, method{
+ name: m.name,
+ mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.textOff(m.tfn)),
+ })
}
}
if unt := ptr.elem.uncommon(); unt != nil {
- for _, m := range unt.methods {
+ for _, m := range unt.methods() {
if m.name.pkgPath() != nil {
// TODO(sbinet)
panic("reflect: embedded interface with unexported method(s) not implemented")
}
- typ.u.methods = append(typ.u.methods, m)
+ methods = append(methods, method{
+ name: m.name,
+ mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
+ tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
+ })
}
}
default:
if unt := ft.uncommon(); unt != nil {
- for _, m := range unt.methods {
+ for _, m := range unt.methods() {
if m.name.pkgPath() != nil {
// TODO(sbinet)
panic("reflect: embedded interface with unexported method(s) not implemented")
}
- typ.u.methods = append(typ.u.methods, m)
+ methods = append(methods, method{
+ name: m.name,
+ mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
+ ifn: resolveReflectText(ft.textOff(m.ifn)),
+ tfn: resolveReflectText(ft.textOff(m.tfn)),
+ })
+
}
}
}
fs[i] = f
}
+
+ var typ *structType
+ var ut *uncommonType
+ var typPin interface {
+ common() *rtype
+ } // structTypeFixedN
+
+ switch {
+ case len(methods) == 0:
+ t := new(structTypeUncommon)
+ typ = &t.structType
+ ut = &t.u
+ typPin = t
+ case len(methods) <= 4:
+ t := new(structTypeFixed4)
+ typ = &t.structType
+ ut = &t.u
+ copy(t.m[:], methods)
+ typPin = t
+ case len(methods) <= 8:
+ t := new(structTypeFixed8)
+ typ = &t.structType
+ ut = &t.u
+ copy(t.m[:], methods)
+ typPin = t
+ case len(methods) <= 16:
+ t := new(structTypeFixed16)
+ typ = &t.structType
+ ut = &t.u
+ copy(t.m[:], methods)
+ typPin = t
+ case len(methods) <= 32:
+ t := new(structTypeFixed32)
+ typ = &t.structType
+ ut = &t.u
+ copy(t.m[:], methods)
+ typPin = t
+ default:
+ panic("reflect.StructOf: too many methods")
+ }
+ ut.mcount = uint16(len(methods))
+ ut.moff = uint16(unsafe.Sizeof(uncommonType{}))
+
if len(fs) > 0 {
repr = append(repr, ' ')
}
// Make the struct type.
var istruct interface{} = struct{}{}
prototype := *(**structType)(unsafe.Pointer(&istruct))
- typ.structType = *prototype
- typ.structType.fields = fs
+ *typ = *prototype
+ typ.fields = fs
// Look in cache
structLookupCache.RLock()
- for _, t := range structLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) {
+ for _, st := range structLookupCache.m[hash] {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t) {
structLookupCache.RUnlock()
- return &t.rtype
+ return t
}
}
structLookupCache.RUnlock()
structLookupCache.Lock()
defer structLookupCache.Unlock()
if structLookupCache.m == nil {
- structLookupCache.m = make(map[uint32][]*structTypeWithMethods)
+ structLookupCache.m = make(map[uint32][]interface {
+ common() *rtype
+ })
}
- for _, t := range structLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) {
- return &t.rtype
+ for _, st := range structLookupCache.m[hash] {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t) {
+ return t
}
}
// even if 't' wasn't a structType with methods, we should be ok
// as the 'u uncommonType' field won't be accessed except when
// tflag&tflagUncommon is set.
- tt := (*structTypeWithMethods)(unsafe.Pointer(t))
- structLookupCache.m[hash] = append(structLookupCache.m[hash], tt)
- return &tt.rtype
+ structLookupCache.m[hash] = append(structLookupCache.m[hash], t)
+ return t
}
}
typ.size = size
typ.align = typalign
typ.fieldAlign = typalign
- if len(typ.u.methods) > 0 {
+ if len(methods) > 0 {
typ.tflag |= tflagUncommon
}
if !hasPtr {
typ.kind &^= kindDirectIface
}
- structLookupCache.m[hash] = append(structLookupCache.m[hash], typ)
+ structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin)
return &typ.rtype
}
}
}
+ _ = resolveReflectType(field.Type.common())
return structField{
name: newName(field.Name, string(field.Tag), field.PkgPath, exported),
typ: field.Type.common(),
} else {
rcvrtype = v.typ
ut := v.typ.uncommon()
- if ut == nil || uint(i) >= uint(len(ut.methods)) {
+ if ut == nil || uint(i) >= uint(ut.mcount) {
panic("reflect: internal error: invalid method index")
}
- m := &ut.methods[i]
+ m := ut.methods()[i]
if !m.name.isExported() {
panic("reflect: " + op + " of unexported method")
}
- fn = unsafe.Pointer(&m.ifn)
- t = m.mtyp
+ ifn := v.typ.textOff(m.ifn)
+ fn = unsafe.Pointer(&ifn)
+ t = v.typ.typeOff(m.mtyp)
}
return
}
}
// Method on concrete type.
ut := v.typ.uncommon()
- if ut == nil || uint(i) >= uint(len(ut.methods)) {
+ if ut == nil || uint(i) >= uint(ut.mcount) {
panic("reflect: internal error: invalid method index")
}
- m := &ut.methods[i]
- return m.mtyp
+ m := ut.methods()[i]
+ return v.typ.typeOff(m.mtyp)
}
// Uint returns v's underlying value, as a uint64.
// so can iterate over both in lock step;
// the loop is O(ni+nt) not O(ni*nt).
ni := len(inter.mhdr)
- nt := len(x.mhdr)
+ nt := int(x.mcount)
+ xmhdr := (*[1 << 16]method)(add(unsafe.Pointer(x), uintptr(x.moff)))[:nt:nt]
j := 0
for k := 0; k < ni; k++ {
i := &inter.mhdr[k]
ipkg = inter.pkgpath
}
for ; j < nt; j++ {
- t := &x.mhdr[j]
- if t.mtyp == itype && t.name.name() == iname {
+ t := &xmhdr[j]
+ if typ.typeOff(t.mtyp) == itype && t.name.name() == iname {
pkgPath := t.name.pkgPath()
if pkgPath == nil {
pkgPath = x.pkgpath
}
if t.name.isExported() || pkgPath == ipkg {
if m != nil {
- *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn
+ ifn := typ.textOff(t.ifn)
+ *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = ifn
}
goto nextimethod
}
tracebackinit()
moduledataverify()
stackinit()
- itabsinit()
mallocinit()
mcommoninit(_g_.m)
+ typelinksinit()
+ itabsinit()
msigsave(_g_.m)
initSigmask = _g_.m.sigmask
}
return sections, ret
}
+
+// reflect_resolveTypeOff resolves an *rtype offset from a base type.
+//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
+func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+ return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
+}
+
+// reflect_resolveTextOff resolves an function pointer offset from a base type.
+//go:linkname reflect_resolveTextOff reflect.resolveTextOff
+func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+ return (*_type)(rtype).textOff(textOff(off))
+
+}
+
+// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
+//go:linkname reflect_addReflectOff reflect.addReflectOff
+func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
+ lock(&reflectOffs.lock)
+ if reflectOffs.m == nil {
+ reflectOffs.m = make(map[int32]unsafe.Pointer)
+ reflectOffs.minv = make(map[unsafe.Pointer]int32)
+ reflectOffs.next = -1
+ }
+ id, found := reflectOffs.minv[ptr]
+ if !found {
+ id = reflectOffs.next
+ reflectOffs.next-- // use negative offsets as IDs to aid debugging
+ reflectOffs.m[id] = ptr
+ reflectOffs.minv[ptr] = id
+ }
+ unlock(&reflectOffs.lock)
+ return id
+}
gcdatamask, gcbssmask bitvector
+ typemap map[typeOff]*_type // offset to *_rtype in previous module
+
next *moduledata
}
return t._string[i+1:]
}
+// reflectOffs holds type offsets defined at run time by the reflect package.
+//
+// When a type is defined at run time, its *rtype data lives on the heap.
+// There are a wide range of possible addresses the heap may use, that
+// may not be representable as a 32-bit offset. Moreover the GC may
+// one day start moving heap memory, in which case there is no stable
+// offset that can be defined.
+//
+// To provide stable offsets, we add pin *rtype objects in a global map
+// and treat the offset as an identifier. We use negative offsets that
+// do not overlap with any compile-time module offsets.
+//
+// Entries are created by reflect.addReflectOff.
+var reflectOffs struct {
+ lock mutex
+ next int32
+ m map[int32]unsafe.Pointer
+ minv map[unsafe.Pointer]int32
+}
+
+func (t *_type) typeOff(off typeOff) *_type {
+ if off == 0 {
+ return nil
+ }
+ base := uintptr(unsafe.Pointer(t))
+ var md *moduledata
+ for next := &firstmoduledata; next != nil; next = next.next {
+ if base >= next.types && base < next.etypes {
+ md = next
+ break
+ }
+ }
+ if md == nil {
+ lock(&reflectOffs.lock)
+ res := reflectOffs.m[int32(off)]
+ unlock(&reflectOffs.lock)
+ if res == nil {
+ println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: type offset base pointer out of range")
+ }
+ return (*_type)(res)
+ }
+ if t := md.typemap[off]; t != nil {
+ return t
+ }
+ res := md.types + uintptr(off)
+ if res > md.etypes {
+ println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
+ throw("runtime: type offset out of range")
+ }
+ return (*_type)(unsafe.Pointer(res))
+}
+
+func (t *_type) textOff(off textOff) unsafe.Pointer {
+ base := uintptr(unsafe.Pointer(t))
+ var md *moduledata
+ for next := &firstmoduledata; next != nil; next = next.next {
+ if base >= next.types && base < next.etypes {
+ md = next
+ break
+ }
+ }
+ if md == nil {
+ lock(&reflectOffs.lock)
+ res := reflectOffs.m[int32(off)]
+ unlock(&reflectOffs.lock)
+ if res == nil {
+ println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
+ for next := &firstmoduledata; next != nil; next = next.next {
+ println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
+ }
+ throw("runtime: text offset base pointer out of range")
+ }
+ return res
+ }
+ res := md.text + uintptr(off)
+ if res > md.etext {
+ println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext))
+ throw("runtime: text offset out of range")
+ }
+ return unsafe.Pointer(res)
+}
+
func (t *functype) in() []*_type {
// See funcType in reflect/type.go for details on data layout.
uadd := uintptr(unsafe.Sizeof(functype{}))
return t.outCount&(1<<15) != 0
}
+type typeOff int32
+type textOff int32
+
type method struct {
name name
- mtyp *_type
- ifn unsafe.Pointer
- tfn unsafe.Pointer
+ mtyp typeOff
+ ifn textOff
+ tfn textOff
}
type uncommontype struct {
pkgpath *string
- mhdr []method
+ mcount uint16 // number of methods
+ moff uint16 // offset from this uncommontype to [mcount]method
}
type imethod struct {
return s
}
+func (n *name) tag() (s string) {
+ tl := n.tagLen()
+ if tl == 0 {
+ return ""
+ }
+ nl := n.nameLen()
+ hdr := (*stringStruct)(unsafe.Pointer(&s))
+ hdr.str = unsafe.Pointer(n.data(3 + nl + 2))
+ hdr.len = tl
+ return s
+}
+
func (n *name) pkgPath() *string {
if *n.data(0)&(1<<2) == 0 {
return nil
off = int(round(uintptr(off), sys.PtrSize))
return *(**string)(unsafe.Pointer(n.data(off)))
}
+
+// typelinksinit scans the types from extra modules and builds the
+// moduledata typemap used to de-duplicate type pointers.
+func typelinksinit() {
+ if firstmoduledata.next == nil {
+ return
+ }
+ typehash := make(map[uint32][]*_type)
+
+ modules := []*moduledata{}
+ for md := &firstmoduledata; md != nil; md = md.next {
+ modules = append(modules, md)
+ }
+ prev, modules := modules[len(modules)-1], modules[:len(modules)-1]
+ for len(modules) > 0 {
+ // Collect types from the previous module into typehash.
+ collect:
+ for _, tl := range prev.typelinks {
+ var t *_type
+ if prev.typemap == nil {
+ t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
+ } else {
+ t = prev.typemap[typeOff(tl)]
+ }
+ // Add to typehash if not seen before.
+ tlist := typehash[t.hash]
+ for _, tcur := range tlist {
+ if tcur == t {
+ continue collect
+ }
+ }
+ typehash[t.hash] = append(tlist, t)
+ }
+
+ // If any of this module's typelinks match a type from a
+ // prior module, prefer that prior type by adding the offset
+ // to this module's typemap.
+ md := modules[len(modules)-1]
+ md.typemap = make(map[typeOff]*_type, len(md.typelinks))
+ for _, tl := range md.typelinks {
+ t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
+ for _, candidate := range typehash[t.hash] {
+ if typesEqual(t, candidate) {
+ t = candidate
+ break
+ }
+ }
+ md.typemap[typeOff(tl)] = t
+ }
+
+ prev, modules = md, modules[:len(modules)-1]
+ }
+}
+
+// typesEqual reports whether two types are equal.
+//
+// Everywhere in the runtime and reflect packages, it is assumed that
+// there is exactly one *_type per Go type, so that pointer equality
+// can be used to test if types are equal. There is one place that
+// breaks this assumption: buildmode=shared. In this case a type can
+// appear as two different pieces of memory. This is hidden from the
+// runtime and reflect package by the per-module typemap built in
+// typelinksinit. It uses typesEqual to map types from later modules
+// back into earlier ones.
+//
+// Only typelinksinit needs this function.
+func typesEqual(t, v *_type) bool {
+ if t == v {
+ return true
+ }
+ kind := t.kind & kindMask
+ if kind != v.kind&kindMask {
+ return false
+ }
+ if t._string != v._string {
+ return false
+ }
+ ut := t.uncommon()
+ uv := v.uncommon()
+ if ut != nil || uv != nil {
+ if ut == nil || uv == nil {
+ return false
+ }
+ if !pkgPathEqual(ut.pkgpath, uv.pkgpath) {
+ return false
+ }
+ }
+ if kindBool <= kind && kind <= kindComplex128 {
+ return true
+ }
+ switch kind {
+ case kindString, kindUnsafePointer:
+ return true
+ case kindArray:
+ at := (*arraytype)(unsafe.Pointer(t))
+ av := (*arraytype)(unsafe.Pointer(v))
+ return typesEqual(at.elem, av.elem) && at.len == av.len
+ case kindChan:
+ ct := (*chantype)(unsafe.Pointer(t))
+ cv := (*chantype)(unsafe.Pointer(v))
+ return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem)
+ case kindFunc:
+ ft := (*functype)(unsafe.Pointer(t))
+ fv := (*functype)(unsafe.Pointer(v))
+ if ft.outCount != fv.outCount || ft.inCount != fv.inCount {
+ return false
+ }
+ tin, vin := ft.in(), fv.in()
+ for i := 0; i < len(tin); i++ {
+ if !typesEqual(tin[i], vin[i]) {
+ return false
+ }
+ }
+ tout, vout := ft.out(), fv.out()
+ for i := 0; i < len(tout); i++ {
+ if !typesEqual(tout[i], vout[i]) {
+ return false
+ }
+ }
+ return true
+ case kindInterface:
+ it := (*interfacetype)(unsafe.Pointer(t))
+ iv := (*interfacetype)(unsafe.Pointer(v))
+ if !pkgPathEqual(it.pkgpath, iv.pkgpath) {
+ return false
+ }
+ if len(it.mhdr) != len(iv.mhdr) {
+ return false
+ }
+ for i := range it.mhdr {
+ tm := &it.mhdr[i]
+ vm := &iv.mhdr[i]
+ if tm.name.name() != vm.name.name() {
+ return false
+ }
+ if !pkgPathEqual(tm.name.pkgPath(), vm.name.pkgPath()) {
+ return false
+ }
+ if !typesEqual(tm._type, vm._type) {
+ return false
+ }
+ }
+ return true
+ case kindMap:
+ mt := (*maptype)(unsafe.Pointer(t))
+ mv := (*maptype)(unsafe.Pointer(v))
+ return typesEqual(mt.key, mv.key) && typesEqual(mt.elem, mv.elem)
+ case kindPtr:
+ pt := (*ptrtype)(unsafe.Pointer(t))
+ pv := (*ptrtype)(unsafe.Pointer(v))
+ return typesEqual(pt.elem, pv.elem)
+ case kindSlice:
+ st := (*slicetype)(unsafe.Pointer(t))
+ sv := (*slicetype)(unsafe.Pointer(v))
+ return typesEqual(st.elem, sv.elem)
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(t))
+ sv := (*structtype)(unsafe.Pointer(v))
+ if len(st.fields) != len(sv.fields) {
+ return false
+ }
+ for i := range st.fields {
+ tf := &st.fields[i]
+ vf := &sv.fields[i]
+ if tf.name.name() != vf.name.name() {
+ return false
+ }
+ if !pkgPathEqual(tf.name.pkgPath(), vf.name.pkgPath()) {
+ return false
+ }
+ if !typesEqual(tf.typ, vf.typ) {
+ return false
+ }
+ if tf.name.tag() != vf.name.tag() {
+ return false
+ }
+ if tf.offset != vf.offset {
+ return false
+ }
+ }
+ return true
+ default:
+ println("runtime: impossible type kind", kind)
+ throw("runtime: impossible type kind")
+ return false
+ }
+}
+
+func pkgPathEqual(p, q *string) bool {
+ if p == q {
+ return true
+ }
+ if p == nil || q == nil {
+ return false
+ }
+ return *p == *q
+}