var Disable_checknil int
-var zerosize int64
-
type Flow struct {
Prog *obj.Prog // actual instruction
P1 *Flow // predecessors of this instruction: p1,
dumpglobls()
externdcl = tmp
- zero := Pkglookup("zerovalue", Runtimepkg)
- ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
-
dumpdata()
obj.Writeobjdirect(Ctxt, bout)
sptr = weaktypesym(tptr)
}
- // All (non-reflect-allocated) Types share the same zero object.
- // Each place in the compiler where a pointer to the zero object
- // might be returned by a runtime call (map access return value,
- // 2-arg type cast) declares the size of the zerovalue it needs.
- // The linker magically takes the max of all the sizes.
- zero := Pkglookup("zerovalue", Runtimepkg)
-
gcsym, useGCProg, ptrdata := dgcsym(t)
// We use size 0 here so we get the pointer to the zero value,
ot += Widthptr
ot = dsymptr(s, ot, sptr, 0) // ptrto type
- ot = dsymptr(s, ot, zero, 0) // ptr to zero value
+ ot = duintptr(s, ot, 0) // ptr to zero value (unused)
return ot
}
typecheck(&n, Etop)
walkexpr(&n, init)
- // mapaccess needs a zero value to be at least this big.
- if zerosize < t.Type.Width {
- zerosize = t.Type.Width
- }
-
// TODO: ptr is always non-nil, so disable nil check for this OIND op.
goto ret
n.Type = t.Type
n.Typecheck = 1
- // mapaccess needs a zero value to be at least this big.
- if zerosize < t.Type.Width {
- zerosize = t.Type.Width
- }
goto ret
case ORECV:
string *string // string form; unnecessary but undeniably useful
*uncommonType // (relatively) uncommon fields
ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
- zero unsafe.Pointer // pointer to zero value
+ zero unsafe.Pointer // unused
}
// a copy of runtime.typeAlg
throw("need padding in bucket (value)")
}
- // make sure zero of element type is available.
+ // make sure zeroptr is large enough
mapzero(t.elem)
// find size parameter which will hold the requested # of elements
raceReadObjectPC(t.key, key, callerpc, pc)
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
}
}
raceReadObjectPC(t.key, key, callerpc, pc)
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
}
}
return ismapkey(t)
}
-var zerobuf struct {
- lock mutex
- p *byte
- size uintptr
-}
+var zerolock mutex
-var zerotiny [1024]byte
+const initialZeroSize = 1024
-// mapzero ensures that t.zero points at a zero value for type t.
-// Types known to the compiler are in read-only memory and all point
-// to a single zero in the bss of a large enough size.
-// Types allocated by package reflect are in writable memory and
-// start out with zero set to nil; we initialize those on demand.
-func mapzero(t *_type) {
- // On ARM, atomicloadp is implemented as xadd(p, 0),
- // so we cannot use atomicloadp on read-only memory.
- // Check whether the pointer is in the heap; if not, it's not writable
- // so the zero value must already be set.
- if GOARCH == "arm" && !inheap(uintptr(unsafe.Pointer(t))) {
- if t.zero == nil {
- print("runtime: map element ", *t._string, " missing zero value\n")
- throw("mapzero")
- }
- return
- }
+var zeroinitial [initialZeroSize]byte
- // Already done?
- // Check without lock, so must use atomicload to sync with atomicstore in allocation case below.
- if atomicloadp(unsafe.Pointer(&t.zero)) != nil {
- return
- }
+// All accesses to zeroptr and zerosize must be atomic so that they
+// can be accessed without locks in the common case.
+var zeroptr unsafe.Pointer = unsafe.Pointer(&zeroinitial)
+var zerosize uintptr = initialZeroSize
- // Small enough for static buffer?
- if t.size <= uintptr(len(zerotiny)) {
- atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(&zerotiny[0]))
+// mapzero ensures that zeroptr points to a buffer large enough to
+// serve as the zero value for t.
+func mapzero(t *_type) {
+ // Is the type small enough for existing buffer?
+ cursize := uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
+ if t.size <= cursize {
return
}
- // Use allocated buffer.
- lock(&zerobuf.lock)
- if zerobuf.size < t.size {
- if zerobuf.size == 0 {
- zerobuf.size = 4 * 1024
- }
- for zerobuf.size < t.size {
- zerobuf.size *= 2
- if zerobuf.size == 0 {
+ // Allocate a new buffer.
+ lock(&zerolock)
+ cursize = uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
+ if cursize < t.size {
+ for cursize < t.size {
+ cursize *= 2
+ if cursize == 0 {
// need >2GB zero on 32-bit machine
throw("map element too large")
}
}
- zerobuf.p = (*byte)(persistentalloc(zerobuf.size, 64, &memstats.other_sys))
+ atomicstorep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
+ atomicstorep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
}
- atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(zerobuf.p))
- unlock(&zerobuf.lock)
+ unlock(&zerolock)
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
var b *bmap
if h.B == 0 {
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
key := (*stringStruct)(unsafe.Pointer(&ky))
if h.B == 0 {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
}
}
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
}
}
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero)
+ return atomicloadp(unsafe.Pointer(&zeroptr))
}
}
}
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
key := (*stringStruct)(unsafe.Pointer(&ky))
if h.B == 0 {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
}
}
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
}
}
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
}
b = b.overflow(t)
if b == nil {
- return unsafe.Pointer(t.elem.zero), false
+ return atomicloadp(unsafe.Pointer(&zeroptr)), false
}
}
}
_string *string
x *uncommontype
ptrto *_type
- zero *byte // ptr to the zero value for this type
+ zero *byte // unused
}
type method struct {