// It panics if i is not in the range [0, NumOut()).
Out(i int) Type
- common() *rtype
+ common() *abi.Type
uncommon() *uncommonType
}
// Ptr is the old name for the Pointer kind.
const Ptr = Pointer
+// uncommonType is present only for defined types or types with methods
+// (if T is a defined type, the uncommonTypes for T and *T have methods).
+// Using a pointer to this struct reduces the overall size required
+// to describe a non-defined type with no methods.
+type uncommonType = abi.UncommonType
+
+// Embed this type to get common/uncommon
+type common struct {
+ abi.Type
+}
+
// rtype is the common implementation of most values.
// It is embedded in other struct types.
type rtype struct {
t abi.Type
}
-type nameOff = abi.NameOff
-type typeOff = abi.TypeOff
-type textOff = abi.TextOff
+func (t *rtype) common() *abi.Type {
+ return &t.t
+}
-// uncommonType is present only for defined types or types with methods
-// (if T is a defined type, the uncommonTypes for T and *T have methods).
-// Using a pointer to this struct reduces the overall size required
-// to describe a non-defined type with no methods.
-type uncommonType = abi.UncommonType
+func (t *rtype) uncommon() *abi.UncommonType {
+ return t.t.Uncommon()
+}
+
+type aNameOff = abi.NameOff
+type aTypeOff = abi.TypeOff
+type aTextOff = abi.TextOff
// ChanDir represents a channel type's direction.
type ChanDir int
// interfaceType represents an interface type.
type interfaceType struct {
- rtype
- PkgPath abi.Name // import path
- Methods []abi.Imethod // sorted by hash
+ abi.InterfaceType // can embed directly because not a public type.
+}
+
+func (t *interfaceType) nameOff(off aNameOff) abi.Name {
+ return toRType(&t.Type).nameOff(off)
+}
+
+func nameOffFor(t *abi.Type, off aNameOff) abi.Name {
+ return toRType(t).nameOff(off)
+}
+
+func typeOffFor(t *abi.Type, off aTypeOff) *abi.Type {
+ return toRType(t).typeOff(off)
+}
+
+func (t *interfaceType) typeOff(off aTypeOff) *abi.Type {
+ return toRType(&t.Type).typeOff(off)
+}
+
+func (t *interfaceType) common() *abi.Type {
+ return &t.Type
+}
+
+func (t *interfaceType) uncommon() *abi.UncommonType {
+ return t.Uncommon()
}
// mapType represents a map type.
type mapType struct {
- rtype
- Key *rtype // map key type
- Elem *rtype // map element (value) type
- Bucket *rtype // internal bucket structure
- // function for hashing keys (ptr to key, seed) -> hash
- Hasher func(unsafe.Pointer, uintptr) uintptr
- Keysize uint8 // size of key slot
- Valuesize uint8 // size of value slot
- Bucketsize uint16 // size of bucket
- Flags uint32
+ abi.MapType
}
// ptrType represents a pointer type.
type ptrType struct {
- rtype
- Elem *rtype // pointer element (pointed at) type
+ abi.PtrType
}
// sliceType represents a slice type.
type sliceType struct {
- rtype
- Elem *rtype // slice element type
+ abi.SliceType
}
// Struct field
-type structField struct {
- Name abi.Name // name is always non-empty
- Typ *rtype // type of field
- Offset uintptr // byte offset of field
-}
-
-func (f *structField) embedded() bool {
- return f.Name.IsEmbedded()
-}
+type structField = abi.StructField
// structType represents a struct type.
type structType struct {
- rtype
- PkgPath abi.Name
- Fields []structField // sorted by offset
+ abi.StructType
}
func pkgPath(n abi.Name) string {
// resolveReflectName adds a name to the reflection lookup map in the runtime.
// It returns a new nameOff that can be used to refer to the pointer.
-func resolveReflectName(n abi.Name) nameOff {
- return nameOff(addReflectOff(unsafe.Pointer(n.Bytes)))
+func resolveReflectName(n abi.Name) aNameOff {
+ return aNameOff(addReflectOff(unsafe.Pointer(n.Bytes)))
}
// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
// It returns a new typeOff that can be used to refer to the pointer.
-func resolveReflectType(t *rtype) typeOff {
- return typeOff(addReflectOff(unsafe.Pointer(t)))
+func resolveReflectType(t *abi.Type) aTypeOff {
+ return aTypeOff(addReflectOff(unsafe.Pointer(t)))
}
// resolveReflectText adds a function pointer to the reflection lookup map in
// the runtime. It returns a new textOff that can be used to refer to the
// pointer.
-func resolveReflectText(ptr unsafe.Pointer) textOff {
- return textOff(addReflectOff(ptr))
+func resolveReflectText(ptr unsafe.Pointer) aTextOff {
+ return aTextOff(addReflectOff(ptr))
}
-func (t *rtype) nameOff(off nameOff) abi.Name {
+func (t *rtype) nameOff(off aNameOff) abi.Name {
return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
}
-func (t *rtype) typeOff(off typeOff) *rtype {
- return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
+func (t *rtype) typeOff(off aTypeOff) *abi.Type {
+ return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
}
-func (t *rtype) textOff(off textOff) unsafe.Pointer {
+func (t *rtype) textOff(off aTextOff) unsafe.Pointer {
return resolveTextOff(unsafe.Pointer(t), int32(off))
}
-func (t *rtype) uncommon() *uncommonType {
- return t.t.Uncommon()
+func textOffFor(t *abi.Type, off aTextOff) unsafe.Pointer {
+ return toRType(t).textOff(off)
}
func (t *rtype) String() string {
func (t *rtype) Kind() Kind { return Kind(t.t.Kind()) }
-func (t *rtype) pointers() bool { return t.t.PtrBytes != 0 }
-
-func (t *rtype) common() *rtype { return t }
-
func (t *rtype) exportedMethods() []abi.Method {
ut := t.uncommon()
if ut == nil {
m.Type = mt
tfn := t.textOff(p.Tfn)
fn := unsafe.Pointer(&tfn)
- m.Func = Value{mt.(*rtype), fn, fl}
+ m.Func = Value{&mt.(*rtype).t, fn, fl}
m.Index = i
return m
return t.nameOff(ut.PkgPath).Name()
}
-func (t *rtype) hasName() bool {
- return t.t.TFlag&abi.TFlagNamed != 0
+func pkgPathFor(t *abi.Type) string {
+ return toRType(t).PkgPath()
}
func (t *rtype) Name() string {
- if !t.hasName() {
+ if !t.t.HasName() {
return ""
}
s := t.String()
return s[i+1:]
}
+func nameFor(t *abi.Type) string {
+ return toRType(t).Name()
+}
+
func (t *rtype) ChanDir() ChanDir {
if t.Kind() != Chan {
panic("reflect: ChanDir of non-chan type " + t.String())
return (*rtype)(unsafe.Pointer(t))
}
-func (t *rtype) Elem() Type {
- switch t.Kind() {
- case Array:
- tt := (*arrayType)(unsafe.Pointer(t))
- return toType(toRType(tt.Elem))
- case Chan:
- tt := (*chanType)(unsafe.Pointer(t))
- return toType(toRType(tt.Elem))
- case Map:
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.Elem)
- case Pointer:
- tt := (*ptrType)(unsafe.Pointer(t))
- return toType(tt.Elem)
- case Slice:
- tt := (*sliceType)(unsafe.Pointer(t))
- return toType(tt.Elem)
+func elem(t *abi.Type) *abi.Type {
+ et := t.Elem()
+ if et != nil {
+ return et
}
- panic("reflect: Elem of invalid type " + t.String())
+ panic("reflect: Elem of invalid type " + stringFor(t))
+}
+
+func (t *rtype) Elem() Type {
+ return toType(elem(t.common()))
}
func (t *rtype) Field(i int) StructField {
panic("reflect: In of non-func type " + t.String())
}
tt := (*abi.FuncType)(unsafe.Pointer(t))
- return toType(toRType(tt.InSlice()[i]))
+ return toType(tt.InSlice()[i])
}
func (t *rtype) NumIn() int {
panic("reflect: Out of non-func type " + t.String())
}
tt := (*abi.FuncType)(unsafe.Pointer(t))
- return toType(toRType(tt.OutSlice()[i]))
+ return toType(tt.OutSlice()[i])
}
func (t *rtype) IsVariadic() bool {
p := &t.Fields[i]
f.Type = toType(p.Typ)
f.Name = p.Name.Name()
- f.Anonymous = p.embedded()
+ f.Anonymous = p.Embedded()
if !p.Name.IsExported() {
f.PkgPath = t.PkgPath.Name()
}
// FieldByIndex returns the nested field corresponding to index.
func (t *structType) FieldByIndex(index []int) (f StructField) {
- f.Type = toType(&t.rtype)
+ f.Type = toType(&t.Type)
for i, x := range index {
if i > 0 {
ft := f.Type
f := &t.Fields[i]
// Find name and (for embedded field) type for field f.
fname := f.Name.Name()
- var ntyp *rtype
- if f.embedded() {
+ var ntyp *abi.Type
+ if f.Embedded() {
// Embedded field of type T or *T.
ntyp = f.Typ
- if ntyp.Kind() == Pointer {
- ntyp = ntyp.Elem().common()
+ if ntyp.Kind() == abi.Pointer {
+ ntyp = ntyp.Elem()
}
}
// Queue embedded struct fields for processing with next level,
// but only if we haven't seen a match yet at this level and only
// if the embedded types haven't already been queued.
- if ok || ntyp == nil || ntyp.Kind() != Struct {
+ if ok || ntyp == nil || ntyp.Kind() != abi.Struct {
continue
}
styp := (*structType)(unsafe.Pointer(ntyp))
if tf.Name.Name() == name {
return t.Field(i), true
}
- if tf.embedded() {
+ if tf.Embedded() {
hasEmbeds = true
}
}
}
// rtypeOf directly extracts the *rtype of the provided value.
-func rtypeOf(i any) *rtype {
+func rtypeOf(i any) *abi.Type {
eface := *(*emptyInterface)(unsafe.Pointer(&i))
return eface.typ
}
// PointerTo returns the pointer type with element t.
// For example, if t represents type Foo, PointerTo(t) represents *Foo.
func PointerTo(t Type) Type {
- return t.(*rtype).ptrTo()
+ return toRType(t.(*rtype).ptrTo())
}
-func (t *rtype) ptrTo() *rtype {
- if t.t.PtrToThis != 0 {
- return t.typeOff(t.t.PtrToThis)
+func (t *rtype) ptrTo() *abi.Type {
+ at := &t.t
+ if at.PtrToThis != 0 {
+ return t.typeOff(at.PtrToThis)
}
// Check the cache.
if pi, ok := ptrMap.Load(t); ok {
- return &pi.(*ptrType).rtype
+ return &pi.(*ptrType).Type
}
// Look in known types.
s := "*" + t.String()
for _, tt := range typesByString(s) {
p := (*ptrType)(unsafe.Pointer(tt))
- if p.Elem != t {
+ if p.Elem != &t.t {
continue
}
pi, _ := ptrMap.LoadOrStore(t, p)
- return &pi.(*ptrType).rtype
+ return &pi.(*ptrType).Type
}
// Create a new ptrType starting with the description
prototype := *(**ptrType)(unsafe.Pointer(&iptr))
pp := *prototype
- pp.t.Str = resolveReflectName(newName(s, "", false, false))
- pp.t.PtrToThis = 0
+ pp.Str = resolveReflectName(newName(s, "", false, false))
+ pp.PtrToThis = 0
// For the type structures linked into the binary, the
// compiler provides a good hash of the string.
// Create a good hash for the new string by using
// the FNV-1 hash's mixing function to combine the
// old hash and the new "*".
- pp.t.Hash = fnv1(t.t.Hash, '*')
+ pp.Hash = fnv1(t.t.Hash, '*')
- pp.Elem = t
+ pp.Elem = at
pi, _ := ptrMap.LoadOrStore(t, &pp)
- return &pi.(*ptrType).rtype
+ return &pi.(*ptrType).Type
+}
+
+func ptrTo(t *abi.Type) *abi.Type {
+ return toRType(t).ptrTo()
}
// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
if u.Kind() != Interface {
panic("reflect: non-interface type passed to Type.Implements")
}
- return implements(u.(*rtype), t)
+ return implements(u.common(), t.common())
}
func (t *rtype) AssignableTo(u Type) bool {
if u == nil {
panic("reflect: nil type passed to Type.AssignableTo")
}
- uu := u.(*rtype)
- return directlyAssignable(uu, t) || implements(uu, t)
+ uu := u.common()
+ return directlyAssignable(uu, t.common()) || implements(uu, t.common())
}
func (t *rtype) ConvertibleTo(u Type) bool {
if u == nil {
panic("reflect: nil type passed to Type.ConvertibleTo")
}
- uu := u.(*rtype)
- return convertOp(uu, t) != nil
+ return convertOp(u.common(), t.common()) != nil
}
func (t *rtype) Comparable() bool {
}
// implements reports whether the type V implements the interface type T.
-func implements(T, V *rtype) bool {
- if T.Kind() != Interface {
+func implements(T, V *abi.Type) bool {
+ if T.Kind() != abi.Interface {
return false
}
t := (*interfaceType)(unsafe.Pointer(T))
// This lets us run the scan in overall linear time instead of
// the quadratic time a naive search would require.
// See also ../runtime/iface.go.
- if V.Kind() == Interface {
+ if V.Kind() == abi.Interface {
v := (*interfaceType)(unsafe.Pointer(V))
i := 0
for j := 0; j < len(v.Methods); j++ {
tm := &t.Methods[i]
tmName := t.nameOff(tm.Name)
vm := &v.Methods[j]
- vmName := V.nameOff(vm.Name)
- if vmName.Name() == tmName.Name() && V.typeOff(vm.Typ) == t.typeOff(tm.Typ) {
+ vmName := nameOffFor(V, vm.Name)
+ if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) {
if !tmName.IsExported() {
tmPkgPath := pkgPath(tmName)
if tmPkgPath == "" {
return false
}
- v := V.uncommon()
+ v := V.Uncommon()
if v == nil {
return false
}
tm := &t.Methods[i]
tmName := t.nameOff(tm.Name)
vm := vmethods[j]
- vmName := V.nameOff(vm.Name)
- if vmName.Name() == tmName.Name() && V.typeOff(vm.Mtyp) == t.typeOff(tm.Typ) {
+ vmName := nameOffFor(V, vm.Name)
+ if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) {
if !tmName.IsExported() {
tmPkgPath := pkgPath(tmName)
if tmPkgPath == "" {
}
vmPkgPath := pkgPath(vmName)
if vmPkgPath == "" {
- vmPkgPath = V.nameOff(v.PkgPath).Name()
+ vmPkgPath = nameOffFor(V, v.PkgPath).Name()
}
if tmPkgPath != vmPkgPath {
continue
// can be directly assigned (using memmove) to another channel type T.
// https://golang.org/doc/go_spec.html#Assignability
// T and V must be both of Chan kind.
-func specialChannelAssignability(T, V *rtype) bool {
+func specialChannelAssignability(T, V *abi.Type) bool {
// Special case:
// x is a bidirectional channel value, T is a channel type,
// x's type V and T have identical element types,
// and at least one of V or T is not a defined type.
- return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
+ return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
}
// directlyAssignable reports whether a value x of type V can be directly
// https://golang.org/doc/go_spec.html#Assignability
// Ignoring the interface rules (implemented elsewhere)
// and the ideal constant rules (no ideal constants at run time).
-func directlyAssignable(T, V *rtype) bool {
+func directlyAssignable(T, V *abi.Type) bool {
// x's type V is identical to T?
if T == V {
return true
// Otherwise at least one of T and V must not be defined
// and they must have the same kind.
- if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
+ if T.HasName() && V.HasName() || T.Kind() != V.Kind() {
return false
}
- if T.Kind() == Chan && specialChannelAssignability(T, V) {
+ if T.Kind() == abi.Chan && specialChannelAssignability(T, V) {
return true
}
return haveIdenticalUnderlyingType(T, V, true)
}
-func haveIdenticalType(T, V Type, cmpTags bool) bool {
+func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool {
if cmpTags {
return T == V
}
- if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() {
+ if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) {
return false
}
- return haveIdenticalUnderlyingType(T.common(), V.common(), false)
+ return haveIdenticalUnderlyingType(T, V, false)
}
-func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
+func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool {
if T == V {
return true
}
- kind := T.Kind()
- if kind != V.Kind() {
+ kind := Kind(T.Kind())
+ if kind != Kind(V.Kind()) {
return false
}
return false
}
for i := 0; i < t.NumIn(); i++ {
- if !haveIdenticalType(toRType(t.In(i)), toRType(v.In(i)), cmpTags) {
+ if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
return false
}
}
for i := 0; i < t.NumOut(); i++ {
- if !haveIdenticalType(toRType(t.Out(i)), toRType(v.Out(i)), cmpTags) {
+ if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
return false
}
}
if tf.Offset != vf.Offset {
return false
}
- if tf.embedded() != vf.embedded() {
+ if tf.Embedded() != vf.Embedded() {
return false
}
}
// pointers, channels, maps, slices, and arrays.
func typelinks() (sections []unsafe.Pointer, offset [][]int32)
-func rtypeOff(section unsafe.Pointer, off int32) *rtype {
- return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
+func rtypeOff(section unsafe.Pointer, off int32) *abi.Type {
+ return (*abi.Type)(add(section, uintptr(off), "sizeof(rtype) > 0"))
}
// typesByString returns the subslice of typelinks() whose elements have
// the given string representation.
// It may be empty (no known types with that string) or may have
// multiple elements (multiple types with that string).
-func typesByString(s string) []*rtype {
+func typesByString(s string) []*abi.Type {
sections, offset := typelinks()
- var ret []*rtype
+ var ret []*abi.Type
for offsI, offs := range offset {
section := sections[offsI]
for i < j {
h := i + (j-i)>>1 // avoid overflow when computing h
// i ≤ h < j
- if !(rtypeOff(section, offs[h]).String() >= s) {
+ if !(stringFor(rtypeOff(section, offs[h])) >= s) {
i = h + 1 // preserves f(i-1) == false
} else {
j = h // preserves f(j) == true
// to do a linear scan anyway.
for j := i; j < len(offs); j++ {
typ := rtypeOff(section, offs[j])
- if typ.String() != s {
+ if stringFor(typ) != s {
break
}
ret = append(ret, typ)
// type kind, one or two subtypes, and an extra integer.
type cacheKey struct {
kind Kind
- t1 *rtype
- t2 *rtype
+ t1 *abi.Type
+ t2 *abi.Type
extra uintptr
}
// The gc runtime imposes a limit of 64 kB on channel element types.
// If t's size is equal to or exceeds this limit, ChanOf panics.
func ChanOf(dir ChanDir, t Type) Type {
- typ := t.(*rtype)
+ typ := t.common()
// Look in cache.
ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
}
// This restriction is imposed by the gc compiler and the runtime.
- if typ.t.Size_ >= 1<<16 {
+ if typ.Size_ >= 1<<16 {
panic("reflect.ChanOf: element size too large")
}
default:
panic("reflect.ChanOf: invalid dir")
case SendDir:
- s = "chan<- " + typ.String()
+ s = "chan<- " + stringFor(typ)
case RecvDir:
- s = "<-chan " + typ.String()
+ s = "<-chan " + stringFor(typ)
case BothDir:
- typeStr := typ.String()
+ typeStr := stringFor(typ)
if typeStr[0] == '<' {
// typ is recv chan, need parentheses as "<-" associates with leftmost
// chan possible, see:
}
for _, tt := range typesByString(s) {
ch := (*chanType)(unsafe.Pointer(tt))
- if ch.Elem == &typ.t && ch.Dir == abi.ChanDir(dir) {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ if ch.Elem == typ && ch.Dir == abi.ChanDir(dir) {
+ ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
}
}
ch.TFlag = abi.TFlagRegularMemory
ch.Dir = abi.ChanDir(dir)
ch.Str = resolveReflectName(newName(s, "", false, false))
- ch.Hash = fnv1(typ.t.Hash, 'c', byte(dir))
- ch.Elem = &typ.t
+ ch.Hash = fnv1(typ.Hash, 'c', byte(dir))
+ ch.Elem = typ
ti, _ := lookupCache.LoadOrStore(ckey, toRType(&ch.Type))
return ti.(Type)
// If the key type is not a valid map key type (that is, if it does
// not implement Go's == operator), MapOf panics.
func MapOf(key, elem Type) Type {
- ktyp := key.(*rtype)
- etyp := elem.(*rtype)
+ ktyp := key.common()
+ etyp := elem.common()
- if ktyp.t.Equal == nil {
- panic("reflect.MapOf: invalid key type " + ktyp.String())
+ if ktyp.Equal == nil {
+ panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
}
// Look in cache.
}
// Look in known types.
- s := "map[" + ktyp.String() + "]" + etyp.String()
+ s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
for _, tt := range typesByString(s) {
mt := (*mapType)(unsafe.Pointer(tt))
if mt.Key == ktyp && mt.Elem == etyp {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
}
}
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
mt := **(**mapType)(unsafe.Pointer(&imap))
- mt.t.Str = resolveReflectName(newName(s, "", false, false))
- mt.t.TFlag = 0
- mt.t.Hash = fnv1(etyp.t.Hash, 'm', byte(ktyp.t.Hash>>24), byte(ktyp.t.Hash>>16), byte(ktyp.t.Hash>>8), byte(ktyp.t.Hash))
+ mt.Str = resolveReflectName(newName(s, "", false, false))
+ mt.TFlag = 0
+ mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
mt.Key = ktyp
mt.Elem = etyp
mt.Bucket = bucketOf(ktyp, etyp)
return typehash(ktyp, p, seed)
}
mt.Flags = 0
- if ktyp.t.Size_ > maxKeySize {
- mt.Keysize = uint8(goarch.PtrSize)
+ if ktyp.Size_ > maxKeySize {
+ mt.KeySize = uint8(goarch.PtrSize)
mt.Flags |= 1 // indirect key
} else {
- mt.Keysize = uint8(ktyp.t.Size_)
+ mt.KeySize = uint8(ktyp.Size_)
}
- if etyp.t.Size_ > maxValSize {
- mt.Valuesize = uint8(goarch.PtrSize)
+ if etyp.Size_ > maxValSize {
+ mt.ValueSize = uint8(goarch.PtrSize)
mt.Flags |= 2 // indirect value
} else {
- mt.Valuesize = uint8(etyp.t.Size_)
+ mt.MapType.ValueSize = uint8(etyp.Size_)
}
- mt.Bucketsize = uint16(mt.Bucket.t.Size_)
+ mt.MapType.BucketSize = uint16(mt.Bucket.Size_)
if isReflexive(ktyp) {
mt.Flags |= 4
}
if hashMightPanic(ktyp) {
mt.Flags |= 16
}
- mt.t.PtrToThis = 0
+ mt.PtrToThis = 0
- ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
return ti.(Type)
}
// Look in cache.
if ts, ok := funcLookupCache.m.Load(hash); ok {
- for _, t := range ts.([]*rtype) {
- if haveIdenticalUnderlyingType(toRType(&ft.Type), t, true) {
- return t
+ for _, t := range ts.([]*abi.Type) {
+ if haveIdenticalUnderlyingType(&ft.Type, t, true) {
+ return toRType(t)
}
}
}
funcLookupCache.Lock()
defer funcLookupCache.Unlock()
if ts, ok := funcLookupCache.m.Load(hash); ok {
- for _, t := range ts.([]*rtype) {
- if haveIdenticalUnderlyingType(toRType(&ft.Type), t, true) {
- return t
+ for _, t := range ts.([]*abi.Type) {
+ if haveIdenticalUnderlyingType(&ft.Type, t, true) {
+ return toRType(t)
}
}
}
- addToCache := func(tt *rtype) Type {
- var rts []*rtype
+ addToCache := func(tt *abi.Type) Type {
+ var rts []*abi.Type
if rti, ok := funcLookupCache.m.Load(hash); ok {
- rts = rti.([]*rtype)
+ rts = rti.([]*abi.Type)
}
funcLookupCache.m.Store(hash, append(rts, tt))
- return tt
+ return toType(tt)
}
// Look in known types for the same string representation.
str := funcStr(ft)
for _, tt := range typesByString(str) {
- if haveIdenticalUnderlyingType(toRType(&ft.Type), tt, true) {
+ if haveIdenticalUnderlyingType(&ft.Type, tt, true) {
return addToCache(tt)
}
}
// Populate the remaining fields of ft and store in cache.
ft.Str = resolveReflectName(newName(str, "", false, false))
ft.PtrToThis = 0
- return addToCache(toRType(&ft.Type))
+ return addToCache(&ft.Type)
}
func stringFor(t *abi.Type) string {
return toRType(t).String()
}
if ft.IsVariadic() && i == int(ft.InCount)-1 {
repr = append(repr, "..."...)
- repr = append(repr, (*sliceType)(unsafe.Pointer(t)).Elem.String()...)
+ repr = append(repr, stringFor((*sliceType)(unsafe.Pointer(t)).Elem)...)
} else {
repr = append(repr, stringFor(t)...)
}
// isReflexive reports whether the == operation on the type is reflexive.
// That is, x == x for all values x of type t.
-func isReflexive(t *rtype) bool {
- switch t.Kind() {
+func isReflexive(t *abi.Type) bool {
+ switch Kind(t.Kind()) {
case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
return true
case Float32, Float64, Complex64, Complex128, Interface:
return false
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
- return isReflexive(toRType(tt.Elem))
+ return isReflexive(tt.Elem)
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for _, f := range tt.Fields {
return true
default:
// Func, Map, Slice, Invalid
- panic("isReflexive called on non-key type " + t.String())
+ panic("isReflexive called on non-key type " + stringFor(t))
}
}
// needKeyUpdate reports whether map overwrites require the key to be copied.
-func needKeyUpdate(t *rtype) bool {
- switch t.Kind() {
+func needKeyUpdate(t *abi.Type) bool {
+ switch Kind(t.Kind()) {
case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
return false
case Float32, Float64, Complex64, Complex128, Interface, String:
return true
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
- return needKeyUpdate(toRType(tt.Elem))
+ return needKeyUpdate(tt.Elem)
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for _, f := range tt.Fields {
return false
default:
// Func, Map, Slice, Invalid
- panic("needKeyUpdate called on non-key type " + t.String())
+ panic("needKeyUpdate called on non-key type " + stringFor(t))
}
}
// hashMightPanic reports whether the hash of a map key of type t might panic.
-func hashMightPanic(t *rtype) bool {
- switch t.Kind() {
+func hashMightPanic(t *abi.Type) bool {
+ switch Kind(t.Kind()) {
case Interface:
return true
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
- return hashMightPanic(toRType(tt.Elem))
+ return hashMightPanic(tt.Elem)
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for _, f := range tt.Fields {
maxValSize uintptr = abi.MapMaxElemBytes
)
-func bucketOf(ktyp, etyp *rtype) *rtype {
- if ktyp.t.Size_ > maxKeySize {
- ktyp = PointerTo(ktyp).(*rtype)
+func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
+ if ktyp.Size_ > maxKeySize {
+ ktyp = ptrTo(ktyp)
}
- if etyp.t.Size_ > maxValSize {
- etyp = PointerTo(etyp).(*rtype)
+ if etyp.Size_ > maxValSize {
+ etyp = ptrTo(etyp)
}
// Prepare GC data if any.
var gcdata *byte
var ptrdata uintptr
- size := bucketSize*(1+ktyp.t.Size_+etyp.t.Size_) + goarch.PtrSize
- if size&uintptr(ktyp.t.Align_-1) != 0 || size&uintptr(etyp.t.Align_-1) != 0 {
+ size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
+ if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
panic("reflect: bad size computation in MapOf")
}
- if ktyp.t.PtrBytes != 0 || etyp.t.PtrBytes != 0 {
- nptr := (bucketSize*(1+ktyp.t.Size_+etyp.t.Size_) + goarch.PtrSize) / goarch.PtrSize
+ if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 {
+ nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
n := (nptr + 7) / 8
// Runtime needs pointer masks to be a multiple of uintptr in size.
mask := make([]byte, n)
base := bucketSize / goarch.PtrSize
- if ktyp.t.PtrBytes != 0 {
+ if ktyp.PtrBytes != 0 {
emitGCMask(mask, base, ktyp, bucketSize)
}
- base += bucketSize * ktyp.t.Size_ / goarch.PtrSize
+ base += bucketSize * ktyp.Size_ / goarch.PtrSize
- if etyp.t.PtrBytes != 0 {
+ if etyp.PtrBytes != 0 {
emitGCMask(mask, base, etyp, bucketSize)
}
- base += bucketSize * etyp.t.Size_ / goarch.PtrSize
+ base += bucketSize * etyp.Size_ / goarch.PtrSize
word := base
mask[word/8] |= 1 << (word % 8)
}
}
- b := &rtype{abi.Type{
+ b := &abi.Type{
Align_: goarch.PtrSize,
Size_: size,
Kind_: uint8(Struct),
PtrBytes: ptrdata,
GCData: gcdata,
- }}
- s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
- b.t.Str = resolveReflectName(newName(s, "", false, false))
+ }
+ s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
+ b.Str = resolveReflectName(newName(s, "", false, false))
return b
}
// emitGCMask writes the GC mask for [n]typ into out, starting at bit
// offset base.
-func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
- if typ.t.Kind_&kindGCProg != 0 {
+func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) {
+ if typ.Kind_&kindGCProg != 0 {
panic("reflect: unexpected GC program")
}
- ptrs := typ.t.PtrBytes / goarch.PtrSize
- words := typ.t.Size_ / goarch.PtrSize
- mask := typ.gcSlice(0, (ptrs+7)/8)
+ ptrs := typ.PtrBytes / goarch.PtrSize
+ words := typ.Size_ / goarch.PtrSize
+ mask := typ.GcSlice(0, (ptrs+7)/8)
for j := uintptr(0); j < ptrs; j++ {
if (mask[j/8]>>(j%8))&1 != 0 {
for i := uintptr(0); i < n; i++ {
// appendGCProg appends the GC program for the first ptrdata bytes of
// typ to dst and returns the extended slice.
-func appendGCProg(dst []byte, typ *rtype) []byte {
- if typ.t.Kind_&kindGCProg != 0 {
+func appendGCProg(dst []byte, typ *abi.Type) []byte {
+ if typ.Kind_&kindGCProg != 0 {
// Element has GC program; emit one element.
- n := uintptr(*(*uint32)(unsafe.Pointer(typ.t.GCData)))
- prog := typ.gcSlice(4, 4+n-1)
+ n := uintptr(*(*uint32)(unsafe.Pointer(typ.GCData)))
+ prog := typ.GcSlice(4, 4+n-1)
return append(dst, prog...)
}
// Element is small with pointer mask; use as literal bits.
- ptrs := typ.t.PtrBytes / goarch.PtrSize
- mask := typ.gcSlice(0, (ptrs+7)/8)
+ ptrs := typ.PtrBytes / goarch.PtrSize
+ mask := typ.GcSlice(0, (ptrs+7)/8)
// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
for ; ptrs > 120; ptrs -= 120 {
// SliceOf returns the slice type with element type t.
// For example, if t represents int, SliceOf(t) represents []int.
func SliceOf(t Type) Type {
- typ := t.(*rtype)
+ typ := t.common()
// Look in cache.
ckey := cacheKey{Slice, typ, nil, 0}
}
// Look in known types.
- s := "[]" + typ.String()
+ s := "[]" + stringFor(typ)
for _, tt := range typesByString(s) {
slice := (*sliceType)(unsafe.Pointer(tt))
if slice.Elem == typ {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
}
}
var islice any = ([]unsafe.Pointer)(nil)
prototype := *(**sliceType)(unsafe.Pointer(&islice))
slice := *prototype
- slice.t.TFlag = 0
- slice.t.Str = resolveReflectName(newName(s, "", false, false))
- slice.t.Hash = fnv1(typ.t.Hash, '[')
+ slice.TFlag = 0
+ slice.Str = resolveReflectName(newName(s, "", false, false))
+ slice.Hash = fnv1(typ.Hash, '[')
slice.Elem = typ
- slice.t.PtrToThis = 0
+ slice.PtrToThis = 0
- ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, toRType(&slice.Type))
return ti.(Type)
}
}
f, fpkgpath := runtimeStructField(field)
ft := f.Typ
- if ft.t.Kind_&kindGCProg != 0 {
+ if ft.Kind_&kindGCProg != 0 {
hasGCProg = true
}
if fpkgpath != "" {
name := f.Name.Name()
hash = fnv1(hash, []byte(name)...)
repr = append(repr, (" " + name)...)
- if f.embedded() {
+ if f.Embedded() {
// Embedded field
- if f.Typ.Kind() == Pointer {
+ if f.Typ.Kind() == abi.Pointer {
// Embedded ** and *interface{} are illegal
elem := ft.Elem()
- if k := elem.Kind(); k == Pointer || k == Interface {
- panic("reflect.StructOf: illegal embedded field type " + ft.String())
+ if k := elem.Kind(); k == abi.Pointer || k == abi.Interface {
+ panic("reflect.StructOf: illegal embedded field type " + stringFor(ft))
}
}
- switch f.Typ.Kind() {
+ switch Kind(f.Typ.Kind()) {
case Interface:
ift := (*interfaceType)(unsafe.Pointer(ft))
for im, m := range ift.Methods {
tfn Value
)
- if ft.t.Kind_&kindDirectIface != 0 {
- tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ if ft.Kind_&kindDirectIface != 0 {
+ tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
var args []Value
var recv = in[0]
if len(in) > 1 {
}
return recv.Field(ifield).Method(imethod).Call(args)
})
- ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
var args []Value
var recv = in[0]
if len(in) > 1 {
return recv.Field(ifield).Method(imethod).Call(args)
})
} else {
- tfn = MakeFunc(mtyp, func(in []Value) []Value {
+ tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
var args []Value
var recv = in[0]
if len(in) > 1 {
}
return recv.Field(ifield).Method(imethod).Call(args)
})
- ifn = MakeFunc(mtyp, func(in []Value) []Value {
+ ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
var args []Value
var recv = Indirect(in[0])
if len(in) > 1 {
}
case Pointer:
ptr := (*ptrType)(unsafe.Pointer(ft))
- if unt := ptr.uncommon(); unt != nil {
+ if unt := ptr.Uncommon(); unt != nil {
if i > 0 && unt.Mcount > 0 {
// Issue 15924.
panic("reflect: embedded type with methods not implemented if type is not first field")
panic("reflect: embedded type with methods not implemented if there is more than one field")
}
for _, m := range unt.Methods() {
- mname := ptr.nameOff(m.Name)
+ mname := nameOffFor(ft, m.Name)
if pkgPath(mname) != "" {
// TODO(sbinet).
// Issue 15924.
}
methods = append(methods, abi.Method{
Name: resolveReflectName(mname),
- Mtyp: resolveReflectType(ptr.typeOff(m.Mtyp)),
- Ifn: resolveReflectText(ptr.textOff(m.Ifn)),
- Tfn: resolveReflectText(ptr.textOff(m.Tfn)),
+ Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)),
+ Ifn: resolveReflectText(textOffFor(ft, m.Ifn)),
+ Tfn: resolveReflectText(textOffFor(ft, m.Tfn)),
})
}
}
- if unt := ptr.Elem.uncommon(); unt != nil {
+ if unt := ptr.Elem.Uncommon(); unt != nil {
for _, m := range unt.Methods() {
- mname := ptr.nameOff(m.Name)
+ mname := nameOffFor(ft, m.Name)
if pkgPath(mname) != "" {
// TODO(sbinet)
// Issue 15924.
}
methods = append(methods, abi.Method{
Name: resolveReflectName(mname),
- Mtyp: resolveReflectType(ptr.Elem.typeOff(m.Mtyp)),
- Ifn: resolveReflectText(ptr.Elem.textOff(m.Ifn)),
- Tfn: resolveReflectText(ptr.Elem.textOff(m.Tfn)),
+ Mtyp: resolveReflectType(typeOffFor(ptr.Elem, m.Mtyp)),
+ Ifn: resolveReflectText(textOffFor(ptr.Elem, m.Ifn)),
+ Tfn: resolveReflectText(textOffFor(ptr.Elem, m.Tfn)),
})
}
}
default:
- if unt := ft.uncommon(); unt != nil {
+ if unt := ft.Uncommon(); unt != nil {
if i > 0 && unt.Mcount > 0 {
// Issue 15924.
panic("reflect: embedded type with methods not implemented if type is not first field")
}
- if len(fields) > 1 && ft.t.Kind_&kindDirectIface != 0 {
+ if len(fields) > 1 && ft.Kind_&kindDirectIface != 0 {
panic("reflect: embedded type with methods not implemented for non-pointer type")
}
for _, m := range unt.Methods() {
- mname := ft.nameOff(m.Name)
+ mname := nameOffFor(ft, m.Name)
if pkgPath(mname) != "" {
// TODO(sbinet)
// Issue 15924.
}
methods = append(methods, abi.Method{
Name: resolveReflectName(mname),
- Mtyp: resolveReflectType(ft.typeOff(m.Mtyp)),
- Ifn: resolveReflectText(ft.textOff(m.Ifn)),
- Tfn: resolveReflectText(ft.textOff(m.Tfn)),
+ Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)),
+ Ifn: resolveReflectText(textOffFor(ft, m.Ifn)),
+ Tfn: resolveReflectText(textOffFor(ft, m.Tfn)),
})
}
}
fset[name] = struct{}{}
- hash = fnv1(hash, byte(ft.t.Hash>>24), byte(ft.t.Hash>>16), byte(ft.t.Hash>>8), byte(ft.t.Hash))
+ hash = fnv1(hash, byte(ft.Hash>>24), byte(ft.Hash>>16), byte(ft.Hash>>8), byte(ft.Hash))
- repr = append(repr, (" " + ft.String())...)
+ repr = append(repr, (" " + stringFor(ft))...)
if f.Name.HasTag() {
hash = fnv1(hash, []byte(f.Name.Tag())...)
repr = append(repr, (" " + strconv.Quote(f.Name.Tag()))...)
repr = append(repr, ';')
}
- comparable = comparable && (ft.t.Equal != nil)
+ comparable = comparable && (ft.Equal != nil)
- offset := align(size, uintptr(ft.t.Align_))
+ offset := align(size, uintptr(ft.Align_))
if offset < size {
panic("reflect.StructOf: struct size would exceed virtual address space")
}
- if ft.t.Align_ > typalign {
- typalign = ft.t.Align_
+ if ft.Align_ > typalign {
+ typalign = ft.Align_
}
- size = offset + ft.t.Size_
+ size = offset + ft.Size_
if size < offset {
panic("reflect.StructOf: struct size would exceed virtual address space")
}
f.Offset = offset
- if ft.t.Size_ == 0 {
+ if ft.Size_ == 0 {
lastzero = size
}
if ts, ok := structLookupCache.m.Load(hash); ok {
for _, st := range ts.([]Type) {
t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
+ if haveIdenticalUnderlyingType(&typ.Type, t, true) {
+ return toType(t)
}
}
}
if ts, ok := structLookupCache.m.Load(hash); ok {
for _, st := range ts.([]Type) {
t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
+ if haveIdenticalUnderlyingType(&typ.Type, t, true) {
+ return toType(t)
}
}
}
// Look in known types.
for _, t := range typesByString(str) {
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ if haveIdenticalUnderlyingType(&typ.Type, t, true) {
// even if 't' wasn't a structType with methods, we should be ok
// as the 'u uncommonType' field won't be accessed except when
// tflag&abi.TFlagUncommon is set.
- return addToCache(t)
+ return addToCache(toType(t))
}
}
- typ.t.Str = resolveReflectName(newName(str, "", false, false))
- typ.t.TFlag = 0 // TODO: set tflagRegularMemory
- typ.t.Hash = hash
- typ.t.Size_ = size
- typ.t.PtrBytes = typeptrdata(typ.common())
- typ.t.Align_ = typalign
- typ.t.FieldAlign_ = typalign
- typ.t.PtrToThis = 0
+ typ.Str = resolveReflectName(newName(str, "", false, false))
+ typ.TFlag = 0 // TODO: set tflagRegularMemory
+ typ.Hash = hash
+ typ.Size_ = size
+ typ.PtrBytes = typeptrdata(&typ.Type)
+ typ.Align_ = typalign
+ typ.FieldAlign_ = typalign
+ typ.PtrToThis = 0
if len(methods) > 0 {
- typ.t.TFlag |= abi.TFlagUncommon
+ typ.TFlag |= abi.TFlagUncommon
}
if hasGCProg {
lastPtrField := 0
for i, ft := range fs {
- if ft.Typ.pointers() {
+ if ft.Typ.Pointers() {
lastPtrField = i
}
}
// the last field that contains pointer data
break
}
- if !ft.Typ.pointers() {
+ if !ft.Typ.Pointers() {
// Ignore pointerless fields.
continue
}
}
prog = appendGCProg(prog, ft.Typ)
- off += ft.Typ.t.PtrBytes
+ off += ft.Typ.PtrBytes
}
prog = append(prog, 0)
*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
- typ.t.Kind_ |= kindGCProg
- typ.t.GCData = &prog[0]
+ typ.Kind_ |= kindGCProg
+ typ.GCData = &prog[0]
} else {
- typ.t.Kind_ &^= kindGCProg
+ typ.Kind_ &^= kindGCProg
bv := new(bitVector)
- addTypeBits(bv, 0, typ.common())
+ addTypeBits(bv, 0, &typ.Type)
if len(bv.data) > 0 {
- typ.t.GCData = &bv.data[0]
+ typ.GCData = &bv.data[0]
}
}
- typ.t.Equal = nil
+ typ.Equal = nil
if comparable {
- typ.t.Equal = func(p, q unsafe.Pointer) bool {
+ typ.Equal = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.Fields {
pi := add(p, ft.Offset, "&x.field safe")
qi := add(q, ft.Offset, "&x.field safe")
- if !ft.Typ.t.Equal(pi, qi) {
+ if !ft.Typ.Equal(pi, qi) {
return false
}
}
switch {
case len(fs) == 1 && !ifaceIndir(fs[0].Typ):
// structs of 1 direct iface type can be direct
- typ.t.Kind_ |= kindDirectIface
+ typ.Kind_ |= kindDirectIface
default:
- typ.t.Kind_ &^= kindDirectIface
+ typ.Kind_ &^= kindDirectIface
}
- return addToCache(&typ.rtype)
+ return addToCache(toType(&typ.Type))
}
// runtimeStructField takes a StructField value passed to StructOf and
// typeptrdata returns the length in bytes of the prefix of t
// containing pointer data. Anything after this offset is scalar data.
// keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
-func typeptrdata(t *rtype) uintptr {
+func typeptrdata(t *abi.Type) uintptr {
switch t.Kind() {
- case Struct:
+ case abi.Struct:
st := (*structType)(unsafe.Pointer(t))
// find the last field that has pointers.
field := -1
for i := range st.Fields {
ft := st.Fields[i].Typ
- if ft.pointers() {
+ if ft.Pointers() {
field = i
}
}
return 0
}
f := st.Fields[field]
- return f.Offset + f.Typ.t.PtrBytes
+ return f.Offset + f.Typ.PtrBytes
default:
- panic("reflect.typeptrdata: unexpected type, " + t.String())
+ panic("reflect.typeptrdata: unexpected type, " + stringFor(t))
}
}
panic("reflect: negative length passed to ArrayOf")
}
- typ := elem.(*rtype)
+ typ := elem.common()
// Look in cache.
ckey := cacheKey{Array, typ, nil, uintptr(length)}
}
// Look in known types.
- s := "[" + strconv.Itoa(length) + "]" + typ.String()
+ s := "[" + strconv.Itoa(length) + "]" + stringFor(typ)
for _, tt := range typesByString(s) {
array := (*arrayType)(unsafe.Pointer(tt))
- if toRType(array.Elem) == typ {
- ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ if array.Elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
}
}
var iarray any = [1]unsafe.Pointer{}
prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := *prototype
- array.TFlag = typ.t.TFlag & abi.TFlagRegularMemory
+ array.TFlag = typ.TFlag & abi.TFlagRegularMemory
array.Str = resolveReflectName(newName(s, "", false, false))
- array.Hash = fnv1(typ.t.Hash, '[')
+ array.Hash = fnv1(typ.Hash, '[')
for n := uint32(length); n > 0; n >>= 8 {
array.Hash = fnv1(array.Hash, byte(n))
}
array.Hash = fnv1(array.Hash, ']')
- array.Elem = &(typ.t)
+ array.Elem = typ
array.PtrToThis = 0
- if typ.t.Size_ > 0 {
- max := ^uintptr(0) / typ.t.Size_
+ if typ.Size_ > 0 {
+ max := ^uintptr(0) / typ.Size_
if uintptr(length) > max {
panic("reflect.ArrayOf: array size would exceed virtual address space")
}
}
- array.Size_ = typ.t.Size_ * uintptr(length)
- if length > 0 && typ.t.PtrBytes != 0 {
- array.PtrBytes = typ.t.Size_*uintptr(length-1) + typ.t.PtrBytes
+ array.Size_ = typ.Size_ * uintptr(length)
+ if length > 0 && typ.PtrBytes != 0 {
+ array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes
}
- array.Align_ = typ.t.Align_
- array.FieldAlign_ = typ.t.FieldAlign_
+ array.Align_ = typ.Align_
+ array.FieldAlign_ = typ.FieldAlign_
array.Len = uintptr(length)
array.Slice = &(SliceOf(elem).(*rtype).t)
switch {
- case typ.t.PtrBytes == 0 || array.Size_ == 0:
+ case typ.PtrBytes == 0 || array.Size_ == 0:
// No pointers.
array.GCData = nil
array.PtrBytes = 0
case length == 1:
// In memory, 1-element array looks just like the element.
- array.Kind_ |= typ.t.Kind_ & kindGCProg
- array.GCData = typ.t.GCData
- array.PtrBytes = typ.t.PtrBytes
+ array.Kind_ |= typ.Kind_ & kindGCProg
+ array.GCData = typ.GCData
+ array.PtrBytes = typ.PtrBytes
- case typ.t.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize:
+ case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize:
// Element is small with pointer mask; array is still small.
// Create direct pointer mask by turning each 1 bit in elem
// into length 1 bits in larger mask.
prog := []byte{0, 0, 0, 0} // will be length of prog
prog = appendGCProg(prog, typ)
// Pad from ptrdata to size.
- elemPtrs := typ.t.PtrBytes / goarch.PtrSize
- elemWords := typ.t.Size_ / goarch.PtrSize
+ elemPtrs := typ.PtrBytes / goarch.PtrSize
+ elemWords := typ.Size_ / goarch.PtrSize
if elemPtrs < elemWords {
// Emit literal 0 bit, then repeat as needed.
prog = append(prog, 0x01, 0x00)
array.PtrBytes = array.Size_ // overestimate but ok; must match program
}
- etyp := typ.common()
+ etyp := typ
esize := etyp.Size()
array.Equal = nil
- if eequal := etyp.t.Equal; eequal != nil {
+ if eequal := etyp.Equal; eequal != nil {
array.Equal = func(p, q unsafe.Pointer) bool {
for i := 0; i < length; i++ {
pi := arrayAt(p, i, esize, "i < length")
// a nil *rtype must be replaced by a nil Type, but in gccgo this
// function takes care of ensuring that multiple *rtype for the same
// type are coalesced into a single Type.
-func toType(t *rtype) Type {
+func toType(t *abi.Type) Type {
if t == nil {
return nil
}
- return t
+ return toRType(t)
}
type layoutKey struct {
ftyp *funcType // function signature
- rcvr *rtype // receiver type, or nil if none
+ rcvr *abi.Type // receiver type, or nil if none
}
type layoutType struct {
- t *rtype
+ t *abi.Type
framePool *sync.Pool
abid abiDesc
}
// The returned type exists only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in
// the name for possible debugging use.
-func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abid abiDesc) {
+func funcLayout(t *funcType, rcvr *abi.Type) (frametype *abi.Type, framePool *sync.Pool, abid abiDesc) {
if t.Kind() != abi.Func {
panic("reflect: funcLayout of non-func type " + stringFor(&t.Type))
}
- if rcvr != nil && rcvr.Kind() == Interface {
- panic("reflect: funcLayout with interface receiver " + rcvr.String())
+ if rcvr != nil && rcvr.Kind() == abi.Interface {
+ panic("reflect: funcLayout with interface receiver " + stringFor(rcvr))
}
k := layoutKey{t, rcvr}
if lti, ok := layoutCache.Load(k); ok {
abid = newAbiDesc(t, rcvr)
// build dummy rtype holding gc program
- x := &rtype{abi.Type{
+ x := &abi.Type{
Align_: goarch.PtrSize,
// Don't add spill space here; it's only necessary in
// reflectcall's frame, not in the allocated frame.
// spill space in the frame is no longer required.
Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
- }}
+ }
if abid.stackPtrs.n > 0 {
- x.t.GCData = &abid.stackPtrs.data[0]
+ x.GCData = &abid.stackPtrs.data[0]
}
var s string
if rcvr != nil {
- s = "methodargs(" + rcvr.String() + ")(" + stringFor(&t.Type) + ")"
+ s = "methodargs(" + stringFor(rcvr) + ")(" + stringFor(&t.Type) + ")"
} else {
s = "funcargs(" + stringFor(&t.Type) + ")"
}
- x.t.Str = resolveReflectName(newName(s, "", false, false))
+ x.Str = resolveReflectName(newName(s, "", false, false))
// cache result for future callers
framePool = &sync.Pool{New: func() any {
}
// ifaceIndir reports whether t is stored indirectly in an interface value.
-func ifaceIndir(t *rtype) bool {
- return t.t.Kind_&kindDirectIface == 0
+func ifaceIndir(t *abi.Type) bool {
+ return t.Kind_&kindDirectIface == 0
}
// Note: this type must agree with runtime.bitvector.
bv.n++
}
-func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
- if t.t.PtrBytes == 0 {
+func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) {
+ if t.PtrBytes == 0 {
return
}
- switch Kind(t.t.Kind_ & kindMask) {
+ switch Kind(t.Kind_ & kindMask) {
case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
// 1 pointer at start of representation
for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
// repeat inner type
tt := (*arrayType)(unsafe.Pointer(t))
for i := 0; i < int(tt.Len); i++ {
- addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, toRType(tt.Elem))
+ addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem)
}
case Struct:
// they represent.
type Value struct {
// typ holds the type of the value represented by a Value.
- typ *rtype
+ typ *abi.Type
// Pointer-valued data or, if flagIndir is set, pointer to data.
// Valid when either flagIndir is set or typ.pointers() is true.
// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
// if v.Kind() == Pointer, the base type must not be not-in-heap.
func (v Value) pointer() unsafe.Pointer {
- if v.typ.Size() != goarch.PtrSize || !v.typ.pointers() {
+ if v.typ.Size() != goarch.PtrSize || !v.typ.Pointers() {
panic("can't call pointer on a non-pointer Value")
}
if v.flag&flagIndir != 0 {
e := (*emptyInterface)(unsafe.Pointer(&i))
// First, fill in the data portion of the interface.
switch {
- case ifaceIndir(t):
+ case t.IfaceIndir():
if v.flag&flagIndir == 0 {
panic("bad indir")
}
return Value{}
}
f := flag(t.Kind())
- if ifaceIndir(t) {
+ if t.IfaceIndir() {
f |= flagIndir
}
return Value{t, e.word, f}
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
- typ *rtype
+ typ *abi.Type
word unsafe.Pointer
}
type nonEmptyInterface struct {
// see ../runtime/iface.go:/Itab
itab *struct {
- ityp *rtype // static interface type
- typ *rtype // dynamic concrete type
- hash uint32 // copy of typ.hash
+ ityp *abi.Type // static interface type
+ typ *abi.Type // dynamic concrete type
+ hash uint32 // copy of typ.hash
_ [4]byte
fun [100000]unsafe.Pointer // method table
}
// Preserve flagRO instead of using v.flag.ro() so that
// v.Addr().Elem() is equivalent to v (#32772)
fl := v.flag & flagRO
- return Value{v.typ.ptrTo(), v.ptr, fl | flag(Pointer)}
+ return Value{ptrTo(v.typ), v.ptr, fl | flag(Pointer)}
}
// Bool returns v's underlying value.
func (v Value) bytesSlow() []byte {
switch v.kind() {
case Slice:
- if v.typ.Elem().Kind() != Uint8 {
+ if v.typ.Elem().Kind() != abi.Uint8 {
panic("reflect.Value.Bytes of non-byte slice")
}
// Slice is always bigger than a word; assume flagIndir.
return *(*[]byte)(v.ptr)
case Array:
- if v.typ.Elem().Kind() != Uint8 {
+ if v.typ.Elem().Kind() != abi.Uint8 {
panic("reflect.Value.Bytes of non-byte array")
}
if !v.CanAddr() {
// It panics if v's underlying value is not a slice of runes (int32s).
func (v Value) runes() []rune {
v.mustBe(Slice)
- if v.typ.Elem().Kind() != Int32 {
+ if v.typ.Elem().Kind() != abi.Int32 {
panic("reflect.Value.Bytes of non-rune slice")
}
// Slice is always bigger than a word; assume flagIndir.
var (
fn unsafe.Pointer
rcvr Value
- rcvrtype *rtype
+ rcvrtype *abi.Type
)
if v.flag&flagMethod != 0 {
rcvr = v
// TODO(mknyszek): Figure out if it's possible to get some
// scratch space for this assignment check. Previously, it
// was possible to use space in the argument frame.
- v = v.assignTo("reflect.Value.Call", targ, nil)
+ v = v.assignTo("reflect.Value.Call", &targ.t, nil)
stepsLoop:
for _, st := range abid.call.stepsForValue(i + inStart) {
switch st.kind {
// Copy values to the "stack."
addr := add(stackArgs, st.stkOff, "precomputed stack arg offset")
if v.flag&flagIndir != 0 {
- typedmemmove(targ, addr, v.ptr)
+ typedmemmove(&targ.t, addr, v.ptr)
} else {
*(*unsafe.Pointer)(addr) = v.ptr
}
// allocated, the entire value is according to the ABI. So
// just make an indirection into the allocated frame.
fl := flagIndir | flag(tv.Kind())
- ret[i] = Value{toRType(tv), add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
+ ret[i] = Value{tv, add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
// Note: this does introduce false sharing between results -
// if any result is live, they are all live.
// (And the space for the args is live as well, but as we've
}
// Handle pointers passed in registers.
- if !ifaceIndir(toRType(tv)) {
+ if !ifaceIndir(tv) {
// Pointer-valued data gets put directly
// into v.ptr.
if steps[0].kind != abiStepPointer {
print("kind=", steps[0].kind, ", type=", stringFor(tv), "\n")
panic("mismatch between ABI description and types")
}
- ret[i] = Value{toRType(tv), regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())}
+ ret[i] = Value{tv, regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())}
continue
}
// additional space to the allocated stack frame and storing the
// register-allocated return values into the allocated stack frame and
// referring there in the resulting Value.
- s := unsafe_New(toRType(tv))
+ s := unsafe_New(tv)
for _, st := range steps {
switch st.kind {
case abiStepIntReg:
panic("unknown ABI part kind")
}
}
- ret[i] = Value{toRType(tv), s, flagIndir | flag(tv.Kind())}
+ ret[i] = Value{tv, s, flagIndir | flag(tv.Kind())}
}
}
ptr := frame
in := make([]Value, 0, int(ftyp.InCount))
for i, typ := range ftyp.InSlice() {
- typ := toRType(typ) // FIXME cleanup this loop body
if typ.Size() == 0 {
- in = append(in, Zero(typ))
+ in = append(in, Zero(toRType(typ)))
continue
}
v := Value{typ, nil, flag(typ.Kind())}
// Pointer-valued data gets put directly
// into v.ptr.
if steps[0].kind != abiStepPointer {
- print("kind=", steps[0].kind, ", type=", typ.String(), "\n")
+ print("kind=", steps[0].kind, ", type=", stringFor(typ), "\n")
panic("mismatch between ABI description and types")
}
v.ptr = regs.Ptrs[steps[0].ireg]
// We must clear the destination before calling assignTo,
// in case assignTo writes (with memory barriers) to the
// target location used as scratch space. See issue 39541.
- v = v.assignTo("reflect.MakeFunc", toRType(typ), nil)
+ v = v.assignTo("reflect.MakeFunc", typ, nil)
stepsLoop:
for _, st := range abid.ret.stepsForValue(i) {
switch st.kind {
// The return value rcvrtype gives the method's actual receiver type.
// The return value t gives the method type signature (without the receiver).
// The return value fn is a pointer to the method code.
-func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *funcType, fn unsafe.Pointer) {
+func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *abi.Type, t *funcType, fn unsafe.Pointer) {
i := methodIndex
- if v.typ.Kind() == Interface {
+ if v.typ.Kind() == abi.Interface {
tt := (*interfaceType)(unsafe.Pointer(v.typ))
if uint(i) >= uint(len(tt.Methods)) {
panic("reflect: internal error: invalid method index")
t = (*funcType)(unsafe.Pointer(tt.typeOff(m.Typ)))
} else {
rcvrtype = v.typ
- ms := v.typ.exportedMethods()
+ ms := v.typ.ExportedMethods()
if uint(i) >= uint(len(ms)) {
panic("reflect: internal error: invalid method index")
}
m := ms[i]
- if !v.typ.nameOff(m.Name).IsExported() {
+ if !nameOffFor(v.typ, m.Name).IsExported() {
panic("reflect: " + op + " of unexported method")
}
- ifn := v.typ.textOff(m.Ifn)
+ ifn := textOffFor(v.typ, m.Ifn)
fn = unsafe.Pointer(&ifn)
- t = (*funcType)(unsafe.Pointer(v.typ.typeOff(m.Mtyp)))
+ t = (*funcType)(unsafe.Pointer(typeOffFor(v.typ, m.Mtyp)))
}
return
}
// methods, which always uses one word to record the receiver.
func storeRcvr(v Value, p unsafe.Pointer) {
t := v.typ
- if t.Kind() == Interface {
+ if t.Kind() == abi.Interface {
// the interface data word becomes the receiver word
iface := (*nonEmptyInterface)(v.ptr)
*(*unsafe.Pointer)(p) = iface.word
if vStep.size != mStep.size {
panic("method ABI and value ABI do not align")
}
- typedmemmove(toRType(t),
+ typedmemmove(t,
add(methodFrame, mStep.stkOff, "precomputed stack offset"),
add(valueFrame, vStep.stkOff, "precomputed stack offset"))
continue
case Chan:
return chancap(v.pointer())
case Ptr:
- if v.typ.Elem().Kind() == Array {
+ if v.typ.Elem().Kind() == abi.Array {
return v.typ.Elem().Len()
}
panic("reflect: call of reflect.Value.Cap on ptr to non-array Value")
fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind())
// Using an unexported field forces flagRO.
if !field.Name.IsExported() {
- if field.embedded() {
+ if field.Embedded() {
fl |= flagEmbedRO
} else {
fl |= flagStickyRO
v.mustBe(Struct)
for i, x := range index {
if i > 0 {
- if v.Kind() == Pointer && v.typ.Elem().Kind() == Struct {
+ if v.Kind() == Pointer && v.typ.Elem().Kind() == abi.Struct {
if v.IsNil() {
panic("reflect: indirection through nil pointer to embedded struct")
}
v.mustBe(Struct)
for i, x := range index {
if i > 0 {
- if v.Kind() == Ptr && v.typ.Elem().Kind() == Struct {
+ if v.Kind() == Ptr && v.typ.Elem().Kind() == abi.Struct {
if v.IsNil() {
- return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + v.typ.Elem().Name())
+ return Value{}, errors.New("reflect: indirection through nil pointer to embedded struct field " + nameFor(v.typ.Elem()))
}
v = v.Elem()
}
// It panics if v's Kind is not struct.
func (v Value) FieldByName(name string) Value {
v.mustBe(Struct)
- if f, ok := v.typ.FieldByName(name); ok {
+ if f, ok := toRType(v.typ).FieldByName(name); ok {
return v.FieldByIndex(f.Index)
}
return Value{}
// It panics if v's Kind is not struct.
// It returns the zero Value if no field was found.
func (v Value) FieldByNameFunc(match func(string) bool) Value {
- if f, ok := v.typ.FieldByNameFunc(match); ok {
+ if f, ok := toRType(v.typ).FieldByNameFunc(match); ok {
return v.FieldByIndex(f.Index)
}
return Value{}
if uint(i) >= uint(tt.Len) {
panic("reflect: array index out of range")
}
- typ := toRType(tt.Elem)
+ typ := tt.Elem
offset := uintptr(i) * typ.Size()
// Either flagIndir is set and v.ptr points at array,
return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
case Array:
// If the type is comparable, then compare directly with zero.
- if v.typ.t.Equal != nil && v.typ.Size() <= maxZero {
+ if v.typ.Equal != nil && v.typ.Size() <= maxZero {
if v.flag&flagIndir == 0 {
return v.ptr == nil
}
- return v.typ.t.Equal(v.ptr, unsafe.Pointer(&zeroVal[0]))
+ return v.typ.Equal(v.ptr, unsafe.Pointer(&zeroVal[0]))
}
n := v.Len()
return v.Len() == 0
case Struct:
// If the type is comparable, then compare directly with zero.
- if v.typ.t.Equal != nil && v.typ.Size() <= maxZero {
+ if v.typ.Equal != nil && v.typ.Size() <= maxZero {
if v.flag&flagIndir == 0 {
return v.ptr == nil
}
- return v.typ.t.Equal(v.ptr, unsafe.Pointer(&zeroVal[0]))
+ return v.typ.Equal(v.ptr, unsafe.Pointer(&zeroVal[0]))
}
n := v.NumField()
// String is bigger than a word; assume flagIndir.
return (*unsafeheader.String)(v.ptr).Len
case Ptr:
- if v.typ.Elem().Kind() == Array {
+ if v.typ.Elem().Kind() == abi.Array {
return v.typ.Elem().Len()
}
panic("reflect: call of reflect.Value.Len on ptr to non-array Value")
// copyVal returns a Value containing the map key or value at ptr,
// allocating a new variable as needed.
-func copyVal(typ *rtype, fl flag, ptr unsafe.Pointer) Value {
- if ifaceIndir(typ) {
+func copyVal(typ *abi.Type, fl flag, ptr unsafe.Pointer) Value {
+ if typ.IfaceIndir() {
// Copy result so future changes to the map
// won't change the underlying value.
c := unsafe_New(typ)
if v.typ == nil {
panic(&ValueError{"reflect.Value.Method", Invalid})
}
- if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
+ if v.flag&flagMethod != 0 || uint(i) >= uint(toRType(v.typ).NumMethod()) {
panic("reflect: Method index out of range")
}
- if v.typ.Kind() == Interface && v.IsNil() {
+ if v.typ.Kind() == abi.Interface && v.IsNil() {
panic("reflect: Method on nil interface value")
}
fl := v.flag.ro() | (v.flag & flagIndir)
if v.flag&flagMethod != 0 {
return 0
}
- return v.typ.NumMethod()
+ return toRType(v.typ).NumMethod()
}
// MethodByName returns a function value corresponding to the method
if v.flag&flagMethod != 0 {
return Value{}
}
- m, ok := v.typ.MethodByName(name)
+ m, ok := toRType(v.typ).MethodByName(name)
if !ok {
return Value{}
}
k := v.kind()
switch k {
case Pointer:
- if v.typ.t.PtrBytes == 0 {
+ if v.typ.PtrBytes == 0 {
val := *(*uintptr)(v.ptr)
// Since it is a not-in-heap pointer, all pointers to the heap are
// forbidden! See comment in Value.Elem and issue #48399.
panic("reflect: recv on send-only channel")
}
t := tt.Elem
- rt := toRType(t)
- val = Value{rt, nil, flag(t.Kind())}
+ val = Value{t, nil, flag(t.Kind())}
var p unsafe.Pointer
- if ifaceIndir(rt) {
- p = unsafe_New(rt)
+ if ifaceIndir(t) {
+ p = unsafe_New(t)
val.ptr = p
val.flag |= flagIndir
} else {
panic("reflect: send on recv-only channel")
}
x.mustBeExported()
- x = x.assignTo("reflect.Value.Send", toRType(tt.Elem), nil)
+ x = x.assignTo("reflect.Value.Send", tt.Elem, nil)
var p unsafe.Pointer
if x.flag&flagIndir != 0 {
p = x.ptr
func (v Value) SetBytes(x []byte) {
v.mustBeAssignable()
v.mustBe(Slice)
- if v.typ.Elem().Kind() != Uint8 {
+ if toRType(v.typ).Elem().Kind() != Uint8 { // TODO add Elem method, fix mustBe(Slice) to return slice.
panic("reflect.Value.SetBytes of non-byte slice")
}
*(*[]byte)(v.ptr) = x
func (v Value) setRunes(x []rune) {
v.mustBeAssignable()
v.mustBe(Slice)
- if v.typ.Elem().Kind() != Int32 {
+ if v.typ.Elem().Kind() != abi.Int32 {
panic("reflect.Value.setRunes of non-rune slice")
}
*(*[]rune)(v.ptr) = x
}
fl := v.flag.ro() | flagIndir | flag(Slice)
- return Value{typ.common(), unsafe.Pointer(&x), fl}
+ return Value{typ.Common(), unsafe.Pointer(&x), fl}
}
// Slice3 is the 3-index form of the slice operation: it returns v[i:j:k].
}
fl := v.flag.ro() | flagIndir | flag(Slice)
- return Value{typ.common(), unsafe.Pointer(&x), fl}
+ return Value{typ.Common(), unsafe.Pointer(&x), fl}
}
// String returns the string v's underlying value, as a string.
// Type returns v's type.
func (v Value) Type() Type {
if v.flag != 0 && v.flag&flagMethod == 0 {
- return v.typ
+ return (*rtype)(unsafe.Pointer(v.typ)) // inline of toRType(v.typ), for own inlining in inline test
}
return v.typeSlow()
}
panic(&ValueError{"reflect.Value.Type", Invalid})
}
if v.flag&flagMethod == 0 {
- return v.typ
+ return toRType(v.typ)
}
// Method value.
// v.typ describes the receiver, not the method type.
i := int(v.flag) >> flagMethodShift
- if v.typ.Kind() == Interface {
+ if v.typ.Kind() == abi.Interface {
// Method on interface.
tt := (*interfaceType)(unsafe.Pointer(v.typ))
if uint(i) >= uint(len(tt.Methods)) {
panic("reflect: internal error: invalid method index")
}
m := &tt.Methods[i]
- return v.typ.typeOff(m.Typ)
+ return toRType(typeOffFor(v.typ, m.Typ))
}
// Method on concrete type.
- ms := v.typ.exportedMethods()
+ ms := v.typ.ExportedMethods()
if uint(i) >= uint(len(ms)) {
panic("reflect: internal error: invalid method index")
}
m := ms[i]
- return v.typ.typeOff(m.Mtyp)
+ return toRType(typeOffFor(v.typ, m.Mtyp))
}
// CanUint reports whether Uint can be used without panicking.
k := v.kind()
switch k {
case Pointer:
- if v.typ.t.PtrBytes == 0 {
+ if v.typ.PtrBytes == 0 {
// Since it is a not-in-heap pointer, all pointers to the heap are
// forbidden! See comment in Value.Elem and issue #48399.
if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) {
case p.Len+n < 0:
panic("reflect.Value.Grow: slice overflow")
case p.Len+n > p.Cap:
- t := v.typ.Elem().(*rtype)
+ t := v.typ.Elem()
*p = growslice(t, *p, n)
}
}
sk := src.kind()
var stringCopy bool
if sk != Array && sk != Slice {
- stringCopy = sk == String && dst.typ.Elem().Kind() == Uint8
+ stringCopy = sk == String && dst.typ.Elem().Kind() == abi.Uint8
if !stringCopy {
panic(&ValueError{"reflect.Copy", sk})
}
de := dst.typ.Elem()
if !stringCopy {
se := src.typ.Elem()
- typesMustMatch("reflect.Copy", de, se)
+ typesMustMatch("reflect.Copy", toType(de), toType(se))
}
var ds, ss unsafeheader.Slice
ss.Cap = sh.Len
}
- return typedslicecopy(de.common(), ds, ss)
+ return typedslicecopy(de.Common(), ds, ss)
}
// A runtimeSelect is a single case passed to rselect.
panic("reflect.Select: SendDir case missing Send value")
}
v.mustBeExported()
- v = v.assignTo("reflect.Select", toRType(tt.Elem), nil)
+ v = v.assignTo("reflect.Select", tt.Elem, nil)
if v.flag&flagIndir != 0 {
rc.val = v.ptr
} else {
}
rc.ch = ch.pointer()
rc.typ = toRType(&tt.Type)
- rc.val = unsafe_New(toRType(tt.Elem))
+ rc.val = unsafe_New(tt.Elem)
}
}
if runcases[chosen].dir == SelectRecv {
tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
t := tt.Elem
- rt := toRType(t)
p := runcases[chosen].val
fl := flag(t.Kind())
- if ifaceIndir(rt) {
- recv = Value{rt, p, fl | flagIndir}
+ if t.IfaceIndir() {
+ recv = Value{t, p, fl | flagIndir}
} else {
- recv = Value{rt, *(*unsafe.Pointer)(p), fl}
+ recv = Value{t, *(*unsafe.Pointer)(p), fl}
}
}
return chosen, recv, recvOK
*/
// implemented in package runtime
-func unsafe_New(*rtype) unsafe.Pointer
-func unsafe_NewArray(*rtype, int) unsafe.Pointer
+func unsafe_New(*abi.Type) unsafe.Pointer
+func unsafe_NewArray(*abi.Type, int) unsafe.Pointer
// MakeSlice creates a new zero-initialized slice value
// for the specified slice type, length, and capacity.
panic("reflect.MakeSlice: len > cap")
}
- s := unsafeheader.Slice{Data: unsafe_NewArray(typ.Elem().(*rtype), cap), Len: len, Cap: cap}
- return Value{typ.(*rtype), unsafe.Pointer(&s), flagIndir | flag(Slice)}
+ s := unsafeheader.Slice{Data: unsafe_NewArray(&(typ.Elem().(*rtype).t), cap), Len: len, Cap: cap}
+ return Value{&typ.(*rtype).t, unsafe.Pointer(&s), flagIndir | flag(Slice)}
}
// MakeChan creates a new channel with the specified type and buffer size.
if typ.ChanDir() != BothDir {
panic("reflect.MakeChan: unidirectional channel type")
}
- t := typ.(*rtype)
+ t := typ.common()
ch := makechan(t, buffer)
return Value{t, ch, flag(Chan)}
}
if typ.Kind() != Map {
panic("reflect.MakeMapWithSize of non-map type")
}
- t := typ.(*rtype)
+ t := typ.common()
m := makemap(t, n)
return Value{t, m, flag(Map)}
}
if typ == nil {
panic("reflect: Zero(nil)")
}
- t := typ.(*rtype)
+ t := &typ.(*rtype).t
fl := flag(t.Kind())
- if ifaceIndir(t) {
+ if t.IfaceIndir() {
var p unsafe.Pointer
if t.Size() <= maxZero {
p = unsafe.Pointer(&zeroVal[0])
if typ == nil {
panic("reflect: New(nil)")
}
- t := typ.(*rtype)
- pt := t.ptrTo()
+ t := &typ.(*rtype).t
+ pt := ptrTo(t)
if ifaceIndir(pt) {
// This is a pointer to a not-in-heap type.
panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)")
// For a conversion to an interface type, target, if not nil,
// is a suggested scratch space to use.
// target must be initialized memory (or nil).
-func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
+func (v Value) assignTo(context string, dst *abi.Type, target unsafe.Pointer) Value {
if v.flag&flagMethod != 0 {
v = makeMethodValue(context, v)
}
}
// Failed.
- panic(context + ": value of type " + v.typ.String() + " is not assignable to type " + dst.String())
+ panic(context + ": value of type " + stringFor(v.typ) + " is not assignable to type " + stringFor(dst))
}
// Convert returns the value v converted to type t.
}
op := convertOp(t.common(), v.typ)
if op == nil {
- panic("reflect.Value.Convert: value of type " + v.typ.String() + " cannot be converted to type " + t.String())
+ panic("reflect.Value.Convert: value of type " + stringFor(v.typ) + " cannot be converted to type " + t.String())
}
return op(v, t)
}
// convertOp returns the function to convert a value of type src
// to a value of type dst. If the conversion is illegal, convertOp returns nil.
-func convertOp(dst, src *rtype) func(Value, Type) Value {
- switch src.Kind() {
+func convertOp(dst, src *abi.Type) func(Value, Type) Value {
+ switch Kind(src.Kind()) {
case Int, Int8, Int16, Int32, Int64:
- switch dst.Kind() {
+ switch Kind(dst.Kind()) {
case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtInt
case Float32, Float64:
}
case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
- switch dst.Kind() {
+ switch Kind(dst.Kind()) {
case Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return cvtUint
case Float32, Float64:
}
case Float32, Float64:
- switch dst.Kind() {
+ switch Kind(dst.Kind()) {
case Int, Int8, Int16, Int32, Int64:
return cvtFloatInt
case Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
}
case Complex64, Complex128:
- switch dst.Kind() {
+ switch Kind(dst.Kind()) {
case Complex64, Complex128:
return cvtComplex
}
case String:
- if dst.Kind() == Slice && dst.Elem().PkgPath() == "" {
- switch dst.Elem().Kind() {
+ if dst.Kind() == abi.Slice && pkgPathFor(dst.Elem()) == "" {
+ switch Kind(dst.Elem().Kind()) {
case Uint8:
return cvtStringBytes
case Int32:
}
case Slice:
- if dst.Kind() == String && src.Elem().PkgPath() == "" {
- switch src.Elem().Kind() {
+ if dst.Kind() == abi.String && pkgPathFor(src.Elem()) == "" {
+ switch Kind(src.Elem().Kind()) {
case Uint8:
return cvtBytesString
case Int32:
}
// "x is a slice, T is a pointer-to-array type,
// and the slice and array types have identical element types."
- if dst.Kind() == Pointer && dst.Elem().Kind() == Array && src.Elem() == dst.Elem().Elem() {
+ if dst.Kind() == abi.Pointer && dst.Elem().Kind() == abi.Array && src.Elem() == dst.Elem().Elem() {
return cvtSliceArrayPtr
}
// "x is a slice, T is an array type,
// and the slice and array types have identical element types."
- if dst.Kind() == Array && src.Elem() == dst.Elem() {
+ if dst.Kind() == abi.Array && src.Elem() == dst.Elem() {
return cvtSliceArray
}
case Chan:
- if dst.Kind() == Chan && specialChannelAssignability(dst, src) {
+ if dst.Kind() == abi.Chan && specialChannelAssignability(dst, src) {
return cvtDirect
}
}
}
// dst and src are non-defined pointer types with same underlying base type.
- if dst.Kind() == Pointer && dst.Name() == "" &&
- src.Kind() == Pointer && src.Name() == "" &&
- haveIdenticalUnderlyingType(dst.Elem().common(), src.Elem().common(), false) {
+ if dst.Kind() == abi.Pointer && nameFor(dst) == "" &&
+ src.Kind() == abi.Pointer && nameFor(src) == "" &&
+ haveIdenticalUnderlyingType(elem(dst), elem(src), false) {
return cvtDirect
}
if implements(dst, src) {
- if src.Kind() == Interface {
+ if src.Kind() == abi.Interface {
return cvtI2I
}
return cvtT2I
if typ.NumMethod() == 0 {
*(*any)(target) = x
} else {
- ifaceE2I(typ.(*rtype), x, target)
+ ifaceE2I(typ.common(), x, target)
}
return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)}
}
//go:noescape
func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool
-func makechan(typ *rtype, size int) (ch unsafe.Pointer)
-func makemap(t *rtype, cap int) (m unsafe.Pointer)
+func makechan(typ *abi.Type, size int) (ch unsafe.Pointer)
+func makemap(t *abi.Type, cap int) (m unsafe.Pointer)
//go:noescape
-func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
+func mapaccess(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
//go:noescape
-func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer)
+func mapaccess_faststr(t *abi.Type, m unsafe.Pointer, key string) (val unsafe.Pointer)
//go:noescape
-func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
+func mapassign(t *abi.Type, m unsafe.Pointer, key, val unsafe.Pointer)
//go:noescape
-func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer)
+func mapassign_faststr(t *abi.Type, m unsafe.Pointer, key string, val unsafe.Pointer)
//go:noescape
-func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
+func mapdelete(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer)
//go:noescape
-func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
+func mapdelete_faststr(t *abi.Type, m unsafe.Pointer, key string)
//go:noescape
-func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter)
+func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter)
//go:noescape
func mapiterkey(it *hiter) (key unsafe.Pointer)
//go:noescape
func maplen(m unsafe.Pointer) int
-func mapclear(t *rtype, m unsafe.Pointer)
+func mapclear(t *abi.Type, m unsafe.Pointer)
// call calls fn with "stackArgsSize" bytes of stack arguments laid out
// at stackArgs and register arguments laid out in regArgs. frameSize is
//
//go:noescape
//go:linkname call runtime.reflectcall
-func call(stackArgsType *rtype, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
+func call(stackArgsType *abi.Type, f, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
-func ifaceE2I(t *rtype, src any, dst unsafe.Pointer)
+func ifaceE2I(t *abi.Type, src any, dst unsafe.Pointer)
// memmove copies size bytes to dst from src. No write barriers are used.
//
// typedmemmove copies a value of type t to dst from src.
//
//go:noescape
-func typedmemmove(t *rtype, dst, src unsafe.Pointer)
+func typedmemmove(t *abi.Type, dst, src unsafe.Pointer)
// typedmemclr zeros the value at ptr of type t.
//
//go:noescape
-func typedmemclr(t *rtype, ptr unsafe.Pointer)
+func typedmemclr(t *abi.Type, ptr unsafe.Pointer)
// typedmemclrpartial is like typedmemclr but assumes that
// dst points off bytes into the value and only clears size bytes.
//
//go:noescape
-func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr)
+func typedmemclrpartial(t *abi.Type, ptr unsafe.Pointer, off, size uintptr)
// typedslicecopy copies a slice of elemType values from src to dst,
// returning the number of elements copied.
//
//go:noescape
-func typedslicecopy(elemType *rtype, dst, src unsafeheader.Slice) int
+func typedslicecopy(t *abi.Type, dst, src unsafeheader.Slice) int
// typedarrayclear zeroes the value at ptr of an array of elemType,
// only clears len elem.
//
//go:noescape
-func typedarrayclear(elemType *rtype, ptr unsafe.Pointer, len int)
+func typedarrayclear(elemType *abi.Type, ptr unsafe.Pointer, len int)
//go:noescape
-func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
+func typehash(t *abi.Type, p unsafe.Pointer, h uintptr) uintptr
func verifyNotInHeapPtr(p uintptr) bool
//go:noescape
-func growslice(t *rtype, old unsafeheader.Slice, num int) unsafeheader.Slice
+func growslice(t *abi.Type, old unsafeheader.Slice, num int) unsafeheader.Slice
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that