again:
bucket := hash & bucketMask(h.B)
if h.growing() {
- growWork(t, h, bucket)
+ growWork_fast32(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := tophash(hash)
again:
bucket := hash & bucketMask(h.B)
if h.growing() {
- growWork(t, h, bucket)
+ growWork_fast64(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := tophash(hash)
again:
bucket := hash & bucketMask(h.B)
if h.growing() {
- growWork(t, h, bucket)
+ growWork_faststr(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := tophash(hash)
bucket := hash & bucketMask(h.B)
if h.growing() {
- growWork(t, h, bucket)
+ growWork_fast32(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
search:
bucket := hash & bucketMask(h.B)
if h.growing() {
- growWork(t, h, bucket)
+ growWork_fast64(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
search:
bucket := hash & bucketMask(h.B)
if h.growing() {
- growWork(t, h, bucket)
+ growWork_faststr(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
top := tophash(hash)
}
h.flags &^= hashWriting
}
+
+func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate_fast32(t, h, bucket&h.oldbucketmask())
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.growing() {
+ evacuate_fast32(t, h, h.nevacuate)
+ }
+}
+
+func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := h.noldbuckets()
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ // xy contains the x and y (low and high) evacuation destinations.
+ var xy [2]evacDst
+ x := &xy[0]
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.k = add(unsafe.Pointer(x.b), dataOffset)
+ x.v = add(x.k, bucketCnt*uintptr(t.keysize))
+
+ if !h.sameSizeGrow() {
+ // Only calculate y pointers if we're growing bigger.
+ // Otherwise GC can see bad pointers.
+ y := &xy[1]
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.k = add(unsafe.Pointer(y.b), dataOffset)
+ y.v = add(y.k, bucketCnt*uintptr(t.keysize))
+ }
+
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ v := add(k, bucketCnt*uintptr(t.keysize))
+ for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+ top := b.tophash[i]
+ if top == empty {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ var useY uint8
+ if !h.sameSizeGrow() {
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/value to bucket x or bucket y).
+ hash := t.key.alg.hash(k2, uintptr(h.hash0))
+ if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
+ // If key != key (NaNs), then the hash could be (and probably
+ // will be) entirely different from the old hash. Moreover,
+ // it isn't reproducible. Reproducibility is required in the
+ // presence of iterators, as our evacuation decision must
+ // match whatever decision the iterator made.
+ // Fortunately, we have the freedom to send these keys either
+ // way. Also, tophash is meaningless for these kinds of keys.
+ // We let the low bit of tophash drive the evacuation decision.
+ // We recompute a new random tophash for the next level so
+ // these keys will get evenly distributed across all buckets
+ // after multiple grows.
+ useY = top & 1
+ top = tophash(hash)
+ } else {
+ if hash&newbit != 0 {
+ useY = 1
+ }
+ }
+ }
+
+ b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+ dst := &xy[useY] // evacuation destination
+
+ if dst.i == bucketCnt {
+ dst.b = h.newoverflow(t, dst.b)
+ dst.i = 0
+ dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+ dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
+ }
+ dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ if t.indirectkey {
+ *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
+ } else {
+ typedmemmove(t.key, dst.k, k) // copy value
+ }
+ if t.indirectvalue {
+ *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
+ } else {
+ typedmemmove(t.elem, dst.v, v)
+ }
+ dst.i++
+ // These updates might push these pointers past the end of the
+ // key or value arrays. That's ok, as we have the overflow pointer
+ // at the end of the bucket to protect against pointing past the
+ // end of the bucket.
+ dst.k = add(dst.k, uintptr(t.keysize))
+ dst.v = add(dst.v, uintptr(t.valuesize))
+ }
+ }
+ // Unlink the overflow buckets & clear key/value to help GC.
+ if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ // Preserve b.tophash because the evacuation
+ // state is maintained there.
+ ptr := add(b, dataOffset)
+ n := uintptr(t.bucketsize) - dataOffset
+ memclrHasPointers(ptr, n)
+ }
+ }
+
+ if oldbucket == h.nevacuate {
+ advanceEvacuationMark(h, t, newbit)
+ }
+}
+
+func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate_fast64(t, h, bucket&h.oldbucketmask())
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.growing() {
+ evacuate_fast64(t, h, h.nevacuate)
+ }
+}
+
+func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := h.noldbuckets()
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ // xy contains the x and y (low and high) evacuation destinations.
+ var xy [2]evacDst
+ x := &xy[0]
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.k = add(unsafe.Pointer(x.b), dataOffset)
+ x.v = add(x.k, bucketCnt*uintptr(t.keysize))
+
+ if !h.sameSizeGrow() {
+ // Only calculate y pointers if we're growing bigger.
+ // Otherwise GC can see bad pointers.
+ y := &xy[1]
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.k = add(unsafe.Pointer(y.b), dataOffset)
+ y.v = add(y.k, bucketCnt*uintptr(t.keysize))
+ }
+
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ v := add(k, bucketCnt*uintptr(t.keysize))
+ for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+ top := b.tophash[i]
+ if top == empty {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ var useY uint8
+ if !h.sameSizeGrow() {
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/value to bucket x or bucket y).
+ hash := t.key.alg.hash(k2, uintptr(h.hash0))
+ if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
+ // If key != key (NaNs), then the hash could be (and probably
+ // will be) entirely different from the old hash. Moreover,
+ // it isn't reproducible. Reproducibility is required in the
+ // presence of iterators, as our evacuation decision must
+ // match whatever decision the iterator made.
+ // Fortunately, we have the freedom to send these keys either
+ // way. Also, tophash is meaningless for these kinds of keys.
+ // We let the low bit of tophash drive the evacuation decision.
+ // We recompute a new random tophash for the next level so
+ // these keys will get evenly distributed across all buckets
+ // after multiple grows.
+ useY = top & 1
+ top = tophash(hash)
+ } else {
+ if hash&newbit != 0 {
+ useY = 1
+ }
+ }
+ }
+
+ b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+ dst := &xy[useY] // evacuation destination
+
+ if dst.i == bucketCnt {
+ dst.b = h.newoverflow(t, dst.b)
+ dst.i = 0
+ dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+ dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
+ }
+ dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ if t.indirectkey {
+ *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
+ } else {
+ typedmemmove(t.key, dst.k, k) // copy value
+ }
+ if t.indirectvalue {
+ *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
+ } else {
+ typedmemmove(t.elem, dst.v, v)
+ }
+ dst.i++
+ // These updates might push these pointers past the end of the
+ // key or value arrays. That's ok, as we have the overflow pointer
+ // at the end of the bucket to protect against pointing past the
+ // end of the bucket.
+ dst.k = add(dst.k, uintptr(t.keysize))
+ dst.v = add(dst.v, uintptr(t.valuesize))
+ }
+ }
+ // Unlink the overflow buckets & clear key/value to help GC.
+ if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ // Preserve b.tophash because the evacuation
+ // state is maintained there.
+ ptr := add(b, dataOffset)
+ n := uintptr(t.bucketsize) - dataOffset
+ memclrHasPointers(ptr, n)
+ }
+ }
+
+ if oldbucket == h.nevacuate {
+ advanceEvacuationMark(h, t, newbit)
+ }
+}
+
+func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate_faststr(t, h, bucket&h.oldbucketmask())
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.growing() {
+ evacuate_faststr(t, h, h.nevacuate)
+ }
+}
+
+func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := h.noldbuckets()
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ // xy contains the x and y (low and high) evacuation destinations.
+ var xy [2]evacDst
+ x := &xy[0]
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.k = add(unsafe.Pointer(x.b), dataOffset)
+ x.v = add(x.k, bucketCnt*uintptr(t.keysize))
+
+ if !h.sameSizeGrow() {
+ // Only calculate y pointers if we're growing bigger.
+ // Otherwise GC can see bad pointers.
+ y := &xy[1]
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.k = add(unsafe.Pointer(y.b), dataOffset)
+ y.v = add(y.k, bucketCnt*uintptr(t.keysize))
+ }
+
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ v := add(k, bucketCnt*uintptr(t.keysize))
+ for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+ top := b.tophash[i]
+ if top == empty {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ var useY uint8
+ if !h.sameSizeGrow() {
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/value to bucket x or bucket y).
+ hash := t.key.alg.hash(k2, uintptr(h.hash0))
+ if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
+ // If key != key (NaNs), then the hash could be (and probably
+ // will be) entirely different from the old hash. Moreover,
+ // it isn't reproducible. Reproducibility is required in the
+ // presence of iterators, as our evacuation decision must
+ // match whatever decision the iterator made.
+ // Fortunately, we have the freedom to send these keys either
+ // way. Also, tophash is meaningless for these kinds of keys.
+ // We let the low bit of tophash drive the evacuation decision.
+ // We recompute a new random tophash for the next level so
+ // these keys will get evenly distributed across all buckets
+ // after multiple grows.
+ useY = top & 1
+ top = tophash(hash)
+ } else {
+ if hash&newbit != 0 {
+ useY = 1
+ }
+ }
+ }
+
+ b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+ dst := &xy[useY] // evacuation destination
+
+ if dst.i == bucketCnt {
+ dst.b = h.newoverflow(t, dst.b)
+ dst.i = 0
+ dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+ dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
+ }
+ dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ if t.indirectkey {
+ *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
+ } else {
+ typedmemmove(t.key, dst.k, k) // copy value
+ }
+ if t.indirectvalue {
+ *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
+ } else {
+ typedmemmove(t.elem, dst.v, v)
+ }
+ dst.i++
+ // These updates might push these pointers past the end of the
+ // key or value arrays. That's ok, as we have the overflow pointer
+ // at the end of the bucket to protect against pointing past the
+ // end of the bucket.
+ dst.k = add(dst.k, uintptr(t.keysize))
+ dst.v = add(dst.v, uintptr(t.valuesize))
+ }
+ }
+ // Unlink the overflow buckets & clear key/value to help GC.
+ // Unlink the overflow buckets & clear key/value to help GC.
+ if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ // Preserve b.tophash because the evacuation
+ // state is maintained there.
+ ptr := add(b, dataOffset)
+ n := uintptr(t.bucketsize) - dataOffset
+ memclrHasPointers(ptr, n)
+ }
+ }
+
+ if oldbucket == h.nevacuate {
+ advanceEvacuationMark(h, t, newbit)
+ }
+}