t.Errorf("DeleteFunc result = %v, want %v", mc, want)
}
}
+
+var n map[int]int
+
+func BenchmarkMapClone(b *testing.B) {
+ var m = make(map[int]int)
+ for i := 0; i < 1000000; i++ {
+ m[i] = i
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ n = Clone(m)
+ }
+}
+
+func TestCloneWithDelete(t *testing.T) {
+ var m = make(map[int]int)
+ for i := 0; i < 32; i++ {
+ m[i] = i
+ }
+ for i := 8; i < 32; i++ {
+ delete(m, i)
+ }
+ m2 := Clone(m)
+ if len(m2) != 8 {
+ t.Errorf("len2(m2) = %d, want %d", len(m2), 8)
+ }
+ for i := 0; i < 8; i++ {
+ if m2[i] != m[i] {
+ t.Errorf("m2[%d] = %d, want %d", i, m2[i], m[i])
+ }
+ }
+}
+
+func TestCloneWithMapAssign(t *testing.T) {
+ var m = make(map[int]int)
+ const N = 25
+ for i := 0; i < N; i++ {
+ m[i] = i
+ }
+ m2 := Clone(m)
+ if len(m2) != N {
+ t.Errorf("len2(m2) = %d, want %d", len(m2), N)
+ }
+ for i := 0; i < N; i++ {
+ if m2[i] != m[i] {
+ t.Errorf("m2[%d] = %d, want %d", i, m2[i], m[i])
+ }
+ }
+}
// map init function to this symbol. Defined in assembly so as to avoid
// complications with instrumentation (coverage, etc).
func mapinitnoop()
+
+// mapclone for implementing maps.Clone
+//
+//go:linkname mapclone maps.clone
+func mapclone(m any) any {
+ e := efaceOf(&m)
+ e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
+ return m
+}
+
+// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
+// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
+func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
+ for i := 0; i < bucketCnt; i++ {
+ if isEmpty(src.tophash[i]) {
+ continue
+ }
+
+ for ; pos < bucketCnt; pos++ {
+ if isEmpty(dst.tophash[pos]) {
+ break
+ }
+ }
+
+ if pos == bucketCnt {
+ dst = h.newoverflow(t, dst)
+ pos = 0
+ }
+
+ srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.keysize))
+ srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(i)*uintptr(t.elemsize))
+ dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.keysize))
+ dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(pos)*uintptr(t.elemsize))
+
+ dst.tophash[pos] = src.tophash[i]
+ if t.indirectkey() {
+ *(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK)
+ } else {
+ typedmemmove(t.key, dstK, srcK)
+ }
+ if t.indirectelem() {
+ *(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle)
+ } else {
+ typedmemmove(t.elem, dstEle, srcEle)
+ }
+ pos++
+ h.count++
+ }
+ return dst, pos
+}
+
+func mapclone2(t *maptype, src *hmap) *hmap {
+ dst := makemap(t, src.count, nil)
+ dst.hash0 = src.hash0
+ dst.nevacuate = 0
+ //flags do not need to be copied here, just like a new map has no flags.
+
+ if src.count == 0 {
+ return dst
+ }
+
+ if src.flags&hashWriting != 0 {
+ fatal("concurrent map clone and map write")
+ }
+
+ if src.B == 0 {
+ dst.buckets = newobject(t.bucket)
+ dst.count = src.count
+ typedmemmove(t.bucket, dst.buckets, src.buckets)
+ return dst
+ }
+
+ //src.B != 0
+ if dst.B == 0 {
+ dst.buckets = newobject(t.bucket)
+ }
+ dstArraySize := int(bucketShift(dst.B))
+ srcArraySize := int(bucketShift(src.B))
+ for i := 0; i < dstArraySize; i++ {
+ dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.bucketsize))))
+ pos := 0
+ for j := 0; j < srcArraySize; j += dstArraySize {
+ srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.bucketsize))))
+ for srcBmap != nil {
+ dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
+ srcBmap = srcBmap.overflow(t)
+ }
+ }
+ }
+
+ if src.oldbuckets == nil {
+ return dst
+ }
+
+ oldB := src.B
+ srcOldbuckets := src.oldbuckets
+ if !src.sameSizeGrow() {
+ oldB--
+ }
+ oldSrcArraySize := int(bucketShift(oldB))
+
+ for i := 0; i < oldSrcArraySize; i++ {
+ srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.bucketsize))))
+ if evacuated(srcBmap) {
+ continue
+ }
+
+ if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
+ dstBmap := (*bmap)(add(dst.buckets, uintptr(i)&bucketMask(dst.B)))
+ for dstBmap.overflow(t) != nil {
+ dstBmap = dstBmap.overflow(t)
+ }
+ pos := 0
+ for srcBmap != nil {
+ dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
+ srcBmap = srcBmap.overflow(t)
+ }
+ continue
+ }
+
+ for srcBmap != nil {
+ // move from oldBlucket to new bucket
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if isEmpty(srcBmap.tophash[i]) {
+ continue
+ }
+
+ if src.flags&hashWriting != 0 {
+ fatal("concurrent map clone and map write")
+ }
+
+ srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey() {
+ srcK = *((*unsafe.Pointer)(srcK))
+ }
+
+ srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ if t.indirectelem() {
+ srcEle = *((*unsafe.Pointer)(srcEle))
+ }
+ dstEle := mapassign(t, dst, srcK)
+ typedmemmove(t.elem, dstEle, srcEle)
+ }
+ srcBmap = srcBmap.overflow(t)
+ }
+ }
+ return dst
+}