p.uncommonType = nil
p.ptrToThis = nil
- p.zero = unsafe.Pointer(&make([]byte, p.size)[0])
p.elem = t
ptrMap.m[t] = p
ch.elem = typ
ch.uncommonType = nil
ch.ptrToThis = nil
- ch.zero = unsafe.Pointer(&make([]byte, ch.size)[0])
return cachePut(ckey, &ch.rtype)
}
mt.reflexivekey = isReflexive(ktyp)
mt.uncommonType = nil
mt.ptrToThis = nil
- mt.zero = unsafe.Pointer(&make([]byte, mt.size)[0])
return cachePut(ckey, &mt.rtype)
}
ft.string = &str
ft.uncommonType = nil
ft.ptrToThis = nil
- ft.zero = unsafe.Pointer(&make([]byte, ft.size)[0])
funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
return ft
slice.elem = typ
slice.uncommonType = nil
slice.ptrToThis = nil
- slice.zero = unsafe.Pointer(&make([]byte, slice.size)[0])
return cachePut(ckey, &slice.rtype)
}
array.fieldAlign = typ.fieldAlign
array.uncommonType = nil
array.ptrToThis = nil
- if array.size > 0 {
- zero := make([]byte, array.size)
- array.zero = unsafe.Pointer(&zero[0])
- }
array.len = uintptr(count)
array.slice = slice.(*rtype)
throw("need padding in bucket (value)")
}
+ // make sure zero of element type is available.
+ mapzero(t.elem)
+
// find size parameter which will hold the requested # of elements
B := uint8(0)
for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
func reflect_ismapkey(t *_type) bool {
return ismapkey(t)
}
+
+var zerobuf struct {
+ lock mutex
+ p *byte
+ size uintptr
+}
+
+var zerotiny [1024]byte
+
+// mapzero ensures that t.zero points at a zero value for type t.
+// Types known to the compiler are in read-only memory and all point
+// to a single zero in the bss of a large enough size.
+// Types allocated by package reflect are in writable memory and
+// start out with zero set to nil; we initialize those on demand.
+func mapzero(t *_type) {
+ // Already done?
+ // Check without lock, so must use atomicload to sync with atomicstore in allocation case below.
+ if atomicloadp(unsafe.Pointer(&t.zero)) != nil {
+ return
+ }
+
+ // Small enough for static buffer?
+ if t.size <= uintptr(len(zerotiny)) {
+ atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(&zerotiny[0]))
+ return
+ }
+
+ // Use allocated buffer.
+ lock(&zerobuf.lock)
+ if zerobuf.size < t.size {
+ if zerobuf.size == 0 {
+ zerobuf.size = 4 * 1024
+ }
+ for zerobuf.size < t.size {
+ zerobuf.size *= 2
+ if zerobuf.size == 0 {
+ // need >2GB zero on 32-bit machine
+ throw("map element too large")
+ }
+ }
+ zerobuf.p = (*byte)(persistentalloc(zerobuf.size, 64, &memstats.other_sys))
+ }
+ atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(zerobuf.p))
+ unlock(&zerobuf.lock)
+}