import (
"fmt"
+ "internal/abi"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
- // Check runtime/map.go:maxElemSize before changing.
- if t.Elem().Size() > 128 {
+ if t.Elem().Size() > abi.MapMaxElemBytes {
return mapslow
}
switch reflectdata.AlgType(t.Key()) {
// Map constants common to several packages
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
+ // Maximum number of key/elem pairs a bucket can hold.
MapBucketCountBits = 3 // log2 of number of elements in a bucket.
MapBucketCount = 1 << MapBucketCountBits
- MapMaxKeyBytes = 128 // Must fit in a uint8.
- MapMaxElemBytes = 128 // Must fit in a uint8.
+
+ // Maximum key or elem size to keep inline (instead of mallocing per element).
+ // Must fit in a uint8.
+ // Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
+ MapMaxKeyBytes = 128
+ MapMaxElemBytes = 128 // Must fit in a uint8.
)
// ZeroValSize is the size in bytes of runtime.zeroVal.
b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
n := 0
for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < bucketCnt; i++ {
+ for i := 0; i < abi.MapBucketCount; i++ {
if b.tophash[i] != emptyRest {
n++
}
}
k := 0
for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < bucketCnt; i++ {
+ for i := 0; i < abi.MapBucketCount; i++ {
if k < n && b.tophash[i] == emptyRest {
panic("early emptyRest")
}
const (
// Maximum number of key/elem pairs a bucket can hold.
bucketCntBits = abi.MapBucketCountBits
- bucketCnt = abi.MapBucketCount
// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
// Because of minimum alignment rules, bucketCnt is known to be at least 8.
// Represent as loadFactorNum/loadFactorDen, to allow integer math.
loadFactorDen = 2
- loadFactorNum = loadFactorDen * bucketCnt * 13 / 16
-
- // Maximum key or elem size to keep inline (instead of mallocing per element).
- // Must fit in a uint8.
- // Fast versions cannot handle big elems - the cutoff size for
- // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
- maxKeySize = abi.MapMaxKeyBytes
- maxElemSize = abi.MapMaxElemBytes
+ loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
// data offset should be the size of the bmap struct, but needs to be
// aligned correctly. For amd64p32 this means 64-bit alignment
// tophash generally contains the top byte of the hash value
// for each key in this bucket. If tophash[0] < minTopHash,
// tophash[0] is a bucket evacuation state instead.
- tophash [bucketCnt]uint8
+ tophash [abi.MapBucketCount]uint8
// Followed by bucketCnt keys and then bucketCnt elems.
// NOTE: packing all the keys together and then all the elems together makes the
// code a bit more complicated than alternating key/elem/key/elem/... but it allows
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
k = *((*unsafe.Pointer)(k))
}
if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
k = *((*unsafe.Pointer)(k))
}
if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
k = *((*unsafe.Pointer)(k))
}
if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
var elem unsafe.Pointer
bucketloop:
for {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if isEmpty(b.tophash[i]) && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+ elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
}
if b.tophash[i] == emptyRest {
break bucketloop
if t.NeedKeyUpdate() {
typedmemmove(t.Key, k, key)
}
- elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+ elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
goto done
}
ovf := b.overflow(t)
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
- elem = add(insertk, bucketCnt*uintptr(t.KeySize))
+ elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
}
// store new key/elem at insert position
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break search
} else if t.Key.PtrBytes != 0 {
memclrHasPointers(k, t.Key.Size_)
}
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
*(*unsafe.Pointer)(e) = nil
} else if t.Elem.PtrBytes != 0 {
// change those to emptyRest states.
// It would be nice to make this a separate function, but
// for loops are not currently inlineable.
- if i == bucketCnt-1 {
+ if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
- i = bucketCnt - 1
+ i = abi.MapBucketCount - 1
} else {
i--
}
// decide where to start
r := uintptr(rand())
it.startBucket = r & bucketMask(h.B)
- it.offset = uint8(r >> h.B & (bucketCnt - 1))
+ it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
// iterator state
it.bucket = it.startBucket
}
i = 0
}
- for ; i < bucketCnt; i++ {
- offi := (i + it.offset) & (bucketCnt - 1)
+ for ; i < abi.MapBucketCount; i++ {
+ offi := (i + it.offset) & (abi.MapBucketCount - 1)
if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
// TODO: emptyRest is hard to use here, as we start iterating
// in the middle of a bucket. It's feasible, just tricky.
if t.IndirectKey() {
k = *((*unsafe.Pointer)(k))
}
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
if checkBucket != noCheck && !h.sameSizeGrow() {
// Special case: iterator was started during a grow to a larger size
// and the grow is not done yet. We're working on a bucket whose
for i := uintptr(0); i <= mask; i++ {
b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
b.tophash[i] = emptyRest
}
}
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
func overLoadFactor(count int, B uint8) bool {
- return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
+ return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
}
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
+ x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
+ y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*uintptr(t.KeySize))
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
+ e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
+ for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
dst := &xy[useY] // evacuation destination
- if dst.i == bucketCnt {
+ if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
+ dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
}
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.IndirectKey() {
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
} else {
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
- if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
- t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
+ if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
+ t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
throw("key size wrong")
}
- if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
- t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
+ if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
+ t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
throw("elem size wrong")
}
- if t.Key.Align_ > bucketCnt {
+ if t.Key.Align_ > abi.MapBucketCount {
throw("key align too big")
}
- if t.Elem.Align_ > bucketCnt {
+ if t.Elem.Align_ > abi.MapBucketCount {
throw("elem align too big")
}
if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
throw("elem size not a multiple of elem align")
}
- if bucketCnt < 8 {
+ if abi.MapBucketCount < 8 {
throw("bucketsize too small for proper alignment")
}
if dataOffset%uintptr(t.Key.Align_) != 0 {
// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
- for i := 0; i < bucketCnt; i++ {
+ for i := 0; i < abi.MapBucketCount; i++ {
if isEmpty(src.tophash[i]) {
continue
}
- for ; pos < bucketCnt; pos++ {
+ for ; pos < abi.MapBucketCount; pos++ {
if isEmpty(dst.tophash[pos]) {
break
}
}
- if pos == bucketCnt {
+ if pos == abi.MapBucketCount {
dst = h.newoverflow(t, dst)
pos = 0
}
srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
- srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
+ srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
- dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
+ dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
dst.tophash[pos] = src.tophash[i]
if t.IndirectKey() {
// Process entries one at a time.
for srcBmap != nil {
// move from oldBlucket to new bucket
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(srcBmap.tophash[i]) {
continue
}
srcK = *((*unsafe.Pointer)(srcK))
}
- srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+ srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
srcEle = *((*unsafe.Pointer)(srcEle))
}
}
s := (*slice)(p)
r := int(rand())
- offset := uint8(r >> h.B & (bucketCnt - 1))
+ offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
if h.B == 0 {
copyKeys(t, h, (*bmap)(h.buckets), s, offset)
return
func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
for b != nil {
- for i := uintptr(0); i < bucketCnt; i++ {
- offi := (i + uintptr(offset)) & (bucketCnt - 1)
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
+ offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
if isEmpty(b.tophash[offi]) {
continue
}
}
s := (*slice)(p)
r := int(rand())
- offset := uint8(r >> h.B & (bucketCnt - 1))
+ offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
if h.B == 0 {
copyValues(t, h, (*bmap)(h.buckets), s, offset)
return
func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
for b != nil {
- for i := uintptr(0); i < bucketCnt; i++ {
- offi := (i + uintptr(offset)) & (bucketCnt - 1)
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
+ offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
if isEmpty(b.tophash[offi]) {
continue
}
fatal("concurrent map read and map write")
}
- ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
+ ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
if t.IndirectElem() {
ele = *((*unsafe.Pointer)(ele))
}
}
}
for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
+ for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
}
}
}
}
}
for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
+ for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true
}
}
}
bucketloop:
for {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
inserti = i
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+ insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
// store new key at insert position
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
bucketloop:
for {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
inserti = i
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+ insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
// store new key at insert position
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
bOrig := b
search:
for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
+ for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
continue
}
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
}
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
if t.Elem.PtrBytes != 0 {
memclrHasPointers(e, t.Elem.Size_)
} else {
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
- if i == bucketCnt-1 {
+ if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
- i = bucketCnt - 1
+ i = abi.MapBucketCount - 1
} else {
i--
}
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*4)
+ x.e = add(x.k, abi.MapBucketCount*4)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*4)
+ y.e = add(y.k, abi.MapBucketCount*4)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*4)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
+ e := add(k, abi.MapBucketCount*4)
+ for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
- if dst.i == bucketCnt {
+ if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*4)
+ dst.e = add(dst.k, abi.MapBucketCount*4)
}
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
}
}
for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
+ for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
}
}
}
}
}
for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
+ for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)), true
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true
}
}
}
bucketloop:
for {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
insertb = b
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+ insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
// store new key at insert position
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
bucketloop:
for {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
insertb = b
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+ insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
// store new key at insert position
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
bOrig := b
search:
for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
+ for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
continue
}
memclrHasPointers(k, 8)
}
}
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
if t.Elem.PtrBytes != 0 {
memclrHasPointers(e, t.Elem.Size_)
} else {
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
- if i == bucketCnt-1 {
+ if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
- i = bucketCnt - 1
+ i = abi.MapBucketCount - 1
} else {
i--
}
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*8)
+ x.e = add(x.k, abi.MapBucketCount*8)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*8)
+ y.e = add(y.k, abi.MapBucketCount*8)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*8)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
+ e := add(k, abi.MapBucketCount*8)
+ for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
- if dst.i == bucketCnt {
+ if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*8)
+ dst.e = add(dst.k, abi.MapBucketCount*8)
}
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if t.Key.PtrBytes != 0 && writeBarrier.enabled {
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
// long key, try not to do more comparisons than necessary
- keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ keymaybe := uintptr(abi.MapBucketCount)
+ for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
- if keymaybe != bucketCnt {
+ if keymaybe != abi.MapBucketCount {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
- if keymaybe != bucketCnt {
+ if keymaybe != abi.MapBucketCount {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
}
}
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// long key, try not to do more comparisons than necessary
- keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ keymaybe := uintptr(abi.MapBucketCount)
+ for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
- if keymaybe != bucketCnt {
+ if keymaybe != abi.MapBucketCount {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
- if keymaybe != bucketCnt {
+ if keymaybe != abi.MapBucketCount {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
+ return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
}
}
bucketloop:
for {
- for i := uintptr(0); i < bucketCnt; i++ {
+ for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if isEmpty(b.tophash[i]) && insertb == nil {
insertb = b
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
- insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
+ insertb.tophash[inserti&(abi.MapBucketCount-1)] = top // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
// store new key at insert position
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
// Clear key's pointer.
k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+ e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
if t.Elem.PtrBytes != 0 {
memclrHasPointers(e, t.Elem.Size_)
} else {
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
- if i == bucketCnt-1 {
+ if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
- i = bucketCnt - 1
+ i = abi.MapBucketCount - 1
} else {
i--
}
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
+ x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
+ y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*2*goarch.PtrSize)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
+ e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
+ for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
- if dst.i == bucketCnt {
+ if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
+ dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
}
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
*(*string)(dst.k) = *(*string)(k)