func (t *rtype) common() *rtype { return t }
-var methodCache struct {
- sync.RWMutex
- m map[*rtype][]method
-}
+var methodCache sync.Map // map[*rtype][]method
func (t *rtype) exportedMethods() []method {
- methodCache.RLock()
- methods, found := methodCache.m[t]
- methodCache.RUnlock()
-
+ methodsi, found := methodCache.Load(t)
if found {
- return methods
+ return methodsi.([]method)
}
ut := t.uncommon()
break
}
}
+ var methods []method
if allExported {
methods = allm
} else {
methods = methods[:len(methods):len(methods)]
}
- methodCache.Lock()
- if methodCache.m == nil {
- methodCache.m = make(map[*rtype][]method)
- }
- methodCache.m[t] = methods
- methodCache.Unlock()
-
- return methods
+ methodsi, _ = methodCache.LoadOrStore(t, methods)
+ return methodsi.([]method)
}
func (t *rtype) NumMethod() int {
return tt.NumMethod()
}
if t.tflag&tflagUncommon == 0 {
- return 0 // avoid methodCache lock in zero case
+ return 0 // avoid methodCache synchronization
}
return len(t.exportedMethods())
}
}
// ptrMap is the cache for PtrTo.
-var ptrMap struct {
- sync.RWMutex
- m map[*rtype]*ptrType
-}
+var ptrMap sync.Map // map[*rtype]*ptrType
// PtrTo returns the pointer type with element t.
// For example, if t represents type Foo, PtrTo(t) represents *Foo.
}
// Check the cache.
- ptrMap.RLock()
- if m := ptrMap.m; m != nil {
- if p := m[t]; p != nil {
- ptrMap.RUnlock()
- return &p.rtype
- }
- }
- ptrMap.RUnlock()
-
- ptrMap.Lock()
- if ptrMap.m == nil {
- ptrMap.m = make(map[*rtype]*ptrType)
- }
- p := ptrMap.m[t]
- if p != nil {
- // some other goroutine won the race and created it
- ptrMap.Unlock()
- return &p.rtype
+ if pi, ok := ptrMap.Load(t); ok {
+ return &pi.(*ptrType).rtype
}
// Look in known types.
s := "*" + t.String()
for _, tt := range typesByString(s) {
- p = (*ptrType)(unsafe.Pointer(tt))
- if p.elem == t {
- ptrMap.m[t] = p
- ptrMap.Unlock()
- return &p.rtype
+ p := (*ptrType)(unsafe.Pointer(tt))
+ if p.elem != t {
+ continue
}
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
}
// Create a new ptrType starting with the description
pp.elem = t
- ptrMap.m[t] = &pp
- ptrMap.Unlock()
- return &pp.rtype
+ pi, _ := ptrMap.LoadOrStore(t, &pp)
+ return &pi.(*ptrType).rtype
}
// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
}
// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
-var lookupCache struct {
- sync.RWMutex
- m map[cacheKey]*rtype
-}
+var lookupCache sync.Map // map[cacheKey]*rtype
// A cacheKey is the key for use in the lookupCache.
// Four values describe any of the types we are looking for:
extra uintptr
}
-// cacheGet looks for a type under the key k in the lookupCache.
-// If it finds one, it returns that type.
-// If not, it returns nil with the cache locked.
-// The caller is expected to use cachePut to unlock the cache.
-func cacheGet(k cacheKey) Type {
- lookupCache.RLock()
- t := lookupCache.m[k]
- lookupCache.RUnlock()
- if t != nil {
- return t
- }
-
- lookupCache.Lock()
- t = lookupCache.m[k]
- if t != nil {
- lookupCache.Unlock()
- return t
- }
-
- if lookupCache.m == nil {
- lookupCache.m = make(map[cacheKey]*rtype)
- }
-
- return nil
-}
-
-// cachePut stores the given type in the cache, unlocks the cache,
-// and returns the type. It is expected that the cache is locked
-// because cacheGet returned nil.
-func cachePut(k cacheKey, t *rtype) Type {
- lookupCache.m[k] = t
- lookupCache.Unlock()
- return t
-}
-
// The funcLookupCache caches FuncOf lookups.
// FuncOf does not share the common lookupCache since cacheKey is not
// sufficient to represent functions unambiguously.
var funcLookupCache struct {
- sync.RWMutex
- m map[uint32][]*rtype // keyed by hash calculated in FuncOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
+ // Elements of m are append-only and thus safe for concurrent reading.
+ m sync.Map
}
// ChanOf returns the channel type with the given direction and element type.
// Look in cache.
ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
- if ch := cacheGet(ckey); ch != nil {
- return ch
+ if ch, ok := lookupCache.Load(ckey); ok {
+ return ch.(*rtype)
}
// This restriction is imposed by the gc compiler and the runtime.
if typ.size >= 1<<16 {
- lookupCache.Unlock()
panic("reflect.ChanOf: element size too large")
}
var s string
switch dir {
default:
- lookupCache.Unlock()
panic("reflect.ChanOf: invalid dir")
case SendDir:
s = "chan<- " + typ.String()
for _, tt := range typesByString(s) {
ch := (*chanType)(unsafe.Pointer(tt))
if ch.elem == typ && ch.dir == uintptr(dir) {
- return cachePut(ckey, tt)
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
}
}
ch.hash = fnv1(typ.hash, 'c', byte(dir))
ch.elem = typ
- return cachePut(ckey, &ch.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
}
func ismapkey(*rtype) bool // implemented in runtime
// Look in cache.
ckey := cacheKey{Map, ktyp, etyp, 0}
- if mt := cacheGet(ckey); mt != nil {
- return mt
+ if mt, ok := lookupCache.Load(ckey); ok {
+ return mt.(Type)
}
// Look in known types.
for _, tt := range typesByString(s) {
mt := (*mapType)(unsafe.Pointer(tt))
if mt.key == ktyp && mt.elem == etyp {
- return cachePut(ckey, tt)
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
}
}
mt.needkeyupdate = needKeyUpdate(ktyp)
mt.ptrToThis = 0
- return cachePut(ckey, &mt.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
}
type funcTypeFixed4 struct {
}
// Look in cache.
- funcLookupCache.RLock()
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- funcLookupCache.RUnlock()
- return t
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
}
}
- funcLookupCache.RUnlock()
// Not in cache, lock and retry.
funcLookupCache.Lock()
defer funcLookupCache.Unlock()
- if funcLookupCache.m == nil {
- funcLookupCache.m = make(map[uint32][]*rtype)
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- return t
+
+ addToCache := func(tt *rtype) Type {
+ var rts []*rtype
+ if rti, ok := funcLookupCache.m.Load(hash); ok {
+ rts = rti.([]*rtype)
}
+ funcLookupCache.m.Store(hash, append(rts, tt))
+ return tt
}
// Look in known types for the same string representation.
str := funcStr(ft)
for _, tt := range typesByString(str) {
if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
- funcLookupCache.m[hash] = append(funcLookupCache.m[hash], tt)
- return tt
+ return addToCache(tt)
}
}
// Populate the remaining fields of ft and store in cache.
ft.str = resolveReflectName(newName(str, "", "", false))
ft.ptrToThis = 0
- funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
-
- return &ft.rtype
+ return addToCache(&ft.rtype)
}
// funcStr builds a string representation of a funcType.
// Look in cache.
ckey := cacheKey{Slice, typ, nil, 0}
- if slice := cacheGet(ckey); slice != nil {
- return slice
+ if slice, ok := lookupCache.Load(ckey); ok {
+ return slice.(Type)
}
// Look in known types.
for _, tt := range typesByString(s) {
slice := (*sliceType)(unsafe.Pointer(tt))
if slice.elem == typ {
- return cachePut(ckey, tt)
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
}
}
slice.elem = typ
slice.ptrToThis = 0
- return cachePut(ckey, &slice.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
}
// The structLookupCache caches StructOf lookups.
// StructOf does not share the common lookupCache since we need to pin
// the memory associated with *structTypeFixedN.
var structLookupCache struct {
- sync.RWMutex
- m map[uint32][]interface {
- common() *rtype
- } // keyed by hash calculated in StructOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
+ // Elements in m are append-only and thus safe for concurrent reading.
+ m sync.Map
}
type structTypeUncommon struct {
var typ *structType
var ut *uncommonType
- var typPin interface {
- common() *rtype
- } // structTypeFixedN
switch {
case len(methods) == 0:
t := new(structTypeUncommon)
typ = &t.structType
ut = &t.u
- typPin = t
case len(methods) <= 4:
t := new(structTypeFixed4)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
- typPin = t
case len(methods) <= 8:
t := new(structTypeFixed8)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
- typPin = t
case len(methods) <= 16:
t := new(structTypeFixed16)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
- typPin = t
case len(methods) <= 32:
t := new(structTypeFixed32)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
- typPin = t
default:
panic("reflect.StructOf: too many methods")
}
*typ = *prototype
typ.fields = fs
- // Look in cache
- structLookupCache.RLock()
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- structLookupCache.RUnlock()
- return t
+ // Look in cache.
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
}
}
- structLookupCache.RUnlock()
- // not in cache, lock and retry
+ // Not in cache, lock and retry.
structLookupCache.Lock()
defer structLookupCache.Unlock()
- if structLookupCache.m == nil {
- structLookupCache.m = make(map[uint32][]interface {
- common() *rtype
- })
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
+
+ addToCache := func(t Type) Type {
+ var ts []Type
+ if ti, ok := structLookupCache.m.Load(hash); ok {
+ ts = ti.([]Type)
}
+ structLookupCache.m.Store(hash, append(ts, t))
+ return t
}
// Look in known types.
// even if 't' wasn't a structType with methods, we should be ok
// as the 'u uncommonType' field won't be accessed except when
// tflag&tflagUncommon is set.
- structLookupCache.m[hash] = append(structLookupCache.m[hash], t)
- return t
+ return addToCache(t)
}
}
typ.kind &^= kindDirectIface
}
- structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin)
- return &typ.rtype
+ return addToCache(&typ.rtype)
}
func runtimeStructField(field StructField) structField {
// ArrayOf panics.
func ArrayOf(count int, elem Type) Type {
typ := elem.(*rtype)
- // call SliceOf here as it calls cacheGet/cachePut.
- // ArrayOf also calls cacheGet/cachePut and thus may modify the state of
- // the lookupCache mutex.
- slice := SliceOf(elem)
// Look in cache.
ckey := cacheKey{Array, typ, nil, uintptr(count)}
- if array := cacheGet(ckey); array != nil {
- return array
+ if array, ok := lookupCache.Load(ckey); ok {
+ return array.(Type)
}
// Look in known types.
for _, tt := range typesByString(s) {
array := (*arrayType)(unsafe.Pointer(tt))
if array.elem == typ {
- return cachePut(ckey, tt)
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
}
}
array.align = typ.align
array.fieldAlign = typ.fieldAlign
array.len = uintptr(count)
- array.slice = slice.(*rtype)
+ array.slice = SliceOf(elem).(*rtype)
array.kind &^= kindNoPointers
switch {
array.kind &^= kindDirectIface
}
- return cachePut(ckey, &array.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
}
func appendVarint(x []byte, v uintptr) []byte {
framePool *sync.Pool
}
-var layoutCache struct {
- sync.RWMutex
- m map[layoutKey]layoutType
-}
+var layoutCache sync.Map // map[layoutKey]layoutType
// funcLayout computes a struct type representing the layout of the
// function arguments and return values for the function type t.
panic("reflect: funcLayout with interface receiver " + rcvr.String())
}
k := layoutKey{t, rcvr}
- layoutCache.RLock()
- if x := layoutCache.m[k]; x.t != nil {
- layoutCache.RUnlock()
- return x.t, x.argSize, x.retOffset, x.stack, x.framePool
- }
- layoutCache.RUnlock()
- layoutCache.Lock()
- if x := layoutCache.m[k]; x.t != nil {
- layoutCache.Unlock()
- return x.t, x.argSize, x.retOffset, x.stack, x.framePool
+ if lti, ok := layoutCache.Load(k); ok {
+ lt := lti.(layoutType)
+ return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
}
tt := (*funcType)(unsafe.Pointer(t))
x.str = resolveReflectName(newName(s, "", "", false))
// cache result for future callers
- if layoutCache.m == nil {
- layoutCache.m = make(map[layoutKey]layoutType)
- }
framePool = &sync.Pool{New: func() interface{} {
return unsafe_New(x)
}}
- layoutCache.m[k] = layoutType{
+ lti, _ := layoutCache.LoadOrStore(k, layoutType{
t: x,
argSize: argSize,
retOffset: retOffset,
stack: ptrmap,
framePool: framePool,
- }
- layoutCache.Unlock()
- return x, argSize, retOffset, ptrmap, framePool
+ })
+ lt := lti.(layoutType)
+ return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
}
// ifaceIndir reports whether t is stored indirectly in an interface value.