]> Cypherpunks repositories - gostls13.git/commitdiff
all: create swissmap experiment and fork files
authorMichael Pratt <mpratt@google.com>
Fri, 19 Apr 2024 15:46:54 +0000 (11:46 -0400)
committerGopher Robot <gobot@golang.org>
Fri, 2 Aug 2024 15:34:37 +0000 (15:34 +0000)
The _swiss.go files are identical to the originals (except build tag).
Later CLs will change them.

For #54766.

Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest-swissmap
Change-Id: I9943e2d6f1cfa227ffbf27c9ddc9ce853695d225
Reviewed-on: https://go-review.googlesource.com/c/go/+/580778
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@golang.org>
Auto-Submit: Michael Pratt <mpratt@google.com>
Commit-Queue: Michael Pratt <mpratt@google.com>
Reviewed-by: Keith Randall <khr@google.com>
15 files changed:
src/internal/goexperiment/exp_swissmap_off.go [new file with mode: 0644]
src/internal/goexperiment/exp_swissmap_on.go [new file with mode: 0644]
src/internal/goexperiment/flags.go
src/reflect/map_noswiss.go [new file with mode: 0644]
src/reflect/map_swiss.go [new file with mode: 0644]
src/reflect/type.go
src/reflect/value.go
src/runtime/map_fast32_noswiss.go [moved from src/runtime/map_fast32.go with 99% similarity]
src/runtime/map_fast32_swiss.go [new file with mode: 0644]
src/runtime/map_fast64_noswiss.go [moved from src/runtime/map_fast64.go with 99% similarity]
src/runtime/map_fast64_swiss.go [new file with mode: 0644]
src/runtime/map_faststr_noswiss.go [moved from src/runtime/map_faststr.go with 99% similarity]
src/runtime/map_faststr_swiss.go [new file with mode: 0644]
src/runtime/map_noswiss.go [moved from src/runtime/map.go with 99% similarity]
src/runtime/map_swiss.go [new file with mode: 0644]

diff --git a/src/internal/goexperiment/exp_swissmap_off.go b/src/internal/goexperiment/exp_swissmap_off.go
new file mode 100644 (file)
index 0000000..2af40aa
--- /dev/null
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.swissmap
+
+package goexperiment
+
+const SwissMap = false
+const SwissMapInt = 0
diff --git a/src/internal/goexperiment/exp_swissmap_on.go b/src/internal/goexperiment/exp_swissmap_on.go
new file mode 100644 (file)
index 0000000..73be49b
--- /dev/null
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.swissmap
+
+package goexperiment
+
+const SwissMap = true
+const SwissMapInt = 1
index 3f9c5af68e36ebf2693ffc0aea9bff5b923830c2..33a24433cbd6b665591450b89acde703a8e65cc1 100644 (file)
@@ -115,4 +115,7 @@ type Flags struct {
        // Requires that gotypesalias=1 is set with GODEBUG.
        // This flag will be removed with Go 1.24.
        AliasTypeParams bool
+
+       // SwissMap enables the SwissTable-based map implementation.
+       SwissMap bool
 }
diff --git a/src/reflect/map_noswiss.go b/src/reflect/map_noswiss.go
new file mode 100644 (file)
index 0000000..de92e76
--- /dev/null
@@ -0,0 +1,473 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.swissmap
+
+package reflect
+
+import (
+       "internal/abi"
+       "internal/goarch"
+       "unsafe"
+)
+
+// mapType represents a map type.
+type mapType struct {
+       abi.MapType
+}
+
+func (t *rtype) Key() Type {
+       if t.Kind() != Map {
+               panic("reflect: Key of non-map type " + t.String())
+       }
+       tt := (*mapType)(unsafe.Pointer(t))
+       return toType(tt.Key)
+}
+
+// MapOf returns the map type with the given key and element types.
+// For example, if k represents int and e represents string,
+// MapOf(k, e) represents map[int]string.
+//
+// If the key type is not a valid map key type (that is, if it does
+// not implement Go's == operator), MapOf panics.
+func MapOf(key, elem Type) Type {
+       ktyp := key.common()
+       etyp := elem.common()
+
+       if ktyp.Equal == nil {
+               panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
+       }
+
+       // Look in cache.
+       ckey := cacheKey{Map, ktyp, etyp, 0}
+       if mt, ok := lookupCache.Load(ckey); ok {
+               return mt.(Type)
+       }
+
+       // Look in known types.
+       s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
+       for _, tt := range typesByString(s) {
+               mt := (*mapType)(unsafe.Pointer(tt))
+               if mt.Key == ktyp && mt.Elem == etyp {
+                       ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
+                       return ti.(Type)
+               }
+       }
+
+       // Make a map type.
+       // Note: flag values must match those used in the TMAP case
+       // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
+       var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
+       mt := **(**mapType)(unsafe.Pointer(&imap))
+       mt.Str = resolveReflectName(newName(s, "", false, false))
+       mt.TFlag = 0
+       mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
+       mt.Key = ktyp
+       mt.Elem = etyp
+       mt.Bucket = bucketOf(ktyp, etyp)
+       mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
+               return typehash(ktyp, p, seed)
+       }
+       mt.Flags = 0
+       if ktyp.Size_ > abi.MapMaxKeyBytes {
+               mt.KeySize = uint8(goarch.PtrSize)
+               mt.Flags |= 1 // indirect key
+       } else {
+               mt.KeySize = uint8(ktyp.Size_)
+       }
+       if etyp.Size_ > abi.MapMaxElemBytes {
+               mt.ValueSize = uint8(goarch.PtrSize)
+               mt.Flags |= 2 // indirect value
+       } else {
+               mt.ValueSize = uint8(etyp.Size_)
+       }
+       mt.BucketSize = uint16(mt.Bucket.Size_)
+       if isReflexive(ktyp) {
+               mt.Flags |= 4
+       }
+       if needKeyUpdate(ktyp) {
+               mt.Flags |= 8
+       }
+       if hashMightPanic(ktyp) {
+               mt.Flags |= 16
+       }
+       mt.PtrToThis = 0
+
+       ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
+       return ti.(Type)
+}
+
+func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
+       if ktyp.Size_ > abi.MapMaxKeyBytes {
+               ktyp = ptrTo(ktyp)
+       }
+       if etyp.Size_ > abi.MapMaxElemBytes {
+               etyp = ptrTo(etyp)
+       }
+
+       // Prepare GC data if any.
+       // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
+       // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
+       // Note that since the key and value are known to be <= 128 bytes,
+       // they're guaranteed to have bitmaps instead of GC programs.
+       var gcdata *byte
+       var ptrdata uintptr
+
+       size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
+       if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
+               panic("reflect: bad size computation in MapOf")
+       }
+
+       if ktyp.Pointers() || etyp.Pointers() {
+               nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
+               n := (nptr + 7) / 8
+
+               // Runtime needs pointer masks to be a multiple of uintptr in size.
+               n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
+               mask := make([]byte, n)
+               base := uintptr(abi.MapBucketCount / goarch.PtrSize)
+
+               if ktyp.Pointers() {
+                       emitGCMask(mask, base, ktyp, abi.MapBucketCount)
+               }
+               base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
+
+               if etyp.Pointers() {
+                       emitGCMask(mask, base, etyp, abi.MapBucketCount)
+               }
+               base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
+
+               word := base
+               mask[word/8] |= 1 << (word % 8)
+               gcdata = &mask[0]
+               ptrdata = (word + 1) * goarch.PtrSize
+
+               // overflow word must be last
+               if ptrdata != size {
+                       panic("reflect: bad layout computation in MapOf")
+               }
+       }
+
+       b := &abi.Type{
+               Align_:   goarch.PtrSize,
+               Size_:    size,
+               Kind_:    abi.Struct,
+               PtrBytes: ptrdata,
+               GCData:   gcdata,
+       }
+       s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
+       b.Str = resolveReflectName(newName(s, "", false, false))
+       return b
+}
+
+var stringType = rtypeOf("")
+
+// MapIndex returns the value associated with key in the map v.
+// It panics if v's Kind is not [Map].
+// It returns the zero Value if key is not found in the map or if v represents a nil map.
+// As in Go, the key's value must be assignable to the map's key type.
+func (v Value) MapIndex(key Value) Value {
+       v.mustBe(Map)
+       tt := (*mapType)(unsafe.Pointer(v.typ()))
+
+       // Do not require key to be exported, so that DeepEqual
+       // and other programs can use all the keys returned by
+       // MapKeys as arguments to MapIndex. If either the map
+       // or the key is unexported, though, the result will be
+       // considered unexported. This is consistent with the
+       // behavior for structs, which allow read but not write
+       // of unexported fields.
+
+       var e unsafe.Pointer
+       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+               k := *(*string)(key.ptr)
+               e = mapaccess_faststr(v.typ(), v.pointer(), k)
+       } else {
+               key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil)
+               var k unsafe.Pointer
+               if key.flag&flagIndir != 0 {
+                       k = key.ptr
+               } else {
+                       k = unsafe.Pointer(&key.ptr)
+               }
+               e = mapaccess(v.typ(), v.pointer(), k)
+       }
+       if e == nil {
+               return Value{}
+       }
+       typ := tt.Elem
+       fl := (v.flag | key.flag).ro()
+       fl |= flag(typ.Kind())
+       return copyVal(typ, fl, e)
+}
+
+// MapKeys returns a slice containing all the keys present in the map,
+// in unspecified order.
+// It panics if v's Kind is not [Map].
+// It returns an empty slice if v represents a nil map.
+func (v Value) MapKeys() []Value {
+       v.mustBe(Map)
+       tt := (*mapType)(unsafe.Pointer(v.typ()))
+       keyType := tt.Key
+
+       fl := v.flag.ro() | flag(keyType.Kind())
+
+       m := v.pointer()
+       mlen := int(0)
+       if m != nil {
+               mlen = maplen(m)
+       }
+       var it hiter
+       mapiterinit(v.typ(), m, &it)
+       a := make([]Value, mlen)
+       var i int
+       for i = 0; i < len(a); i++ {
+               key := mapiterkey(&it)
+               if key == nil {
+                       // Someone deleted an entry from the map since we
+                       // called maplen above. It's a data race, but nothing
+                       // we can do about it.
+                       break
+               }
+               a[i] = copyVal(keyType, fl, key)
+               mapiternext(&it)
+       }
+       return a[:i]
+}
+
+// hiter's structure matches runtime.hiter's structure.
+// Having a clone here allows us to embed a map iterator
+// inside type MapIter so that MapIters can be re-used
+// without doing any allocations.
+type hiter struct {
+       key         unsafe.Pointer
+       elem        unsafe.Pointer
+       t           unsafe.Pointer
+       h           unsafe.Pointer
+       buckets     unsafe.Pointer
+       bptr        unsafe.Pointer
+       overflow    *[]unsafe.Pointer
+       oldoverflow *[]unsafe.Pointer
+       startBucket uintptr
+       offset      uint8
+       wrapped     bool
+       B           uint8
+       i           uint8
+       bucket      uintptr
+       checkBucket uintptr
+}
+
+func (h *hiter) initialized() bool {
+       return h.t != nil
+}
+
+// A MapIter is an iterator for ranging over a map.
+// See [Value.MapRange].
+type MapIter struct {
+       m     Value
+       hiter hiter
+}
+
+// Key returns the key of iter's current map entry.
+func (iter *MapIter) Key() Value {
+       if !iter.hiter.initialized() {
+               panic("MapIter.Key called before Next")
+       }
+       iterkey := mapiterkey(&iter.hiter)
+       if iterkey == nil {
+               panic("MapIter.Key called on exhausted iterator")
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       ktype := t.Key
+       return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
+}
+
+// SetIterKey assigns to v the key of iter's current map entry.
+// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
+// As in Go, the key must be assignable to v's type and
+// must not be derived from an unexported field.
+func (v Value) SetIterKey(iter *MapIter) {
+       if !iter.hiter.initialized() {
+               panic("reflect: Value.SetIterKey called before Next")
+       }
+       iterkey := mapiterkey(&iter.hiter)
+       if iterkey == nil {
+               panic("reflect: Value.SetIterKey called on exhausted iterator")
+       }
+
+       v.mustBeAssignable()
+       var target unsafe.Pointer
+       if v.kind() == Interface {
+               target = v.ptr
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       ktype := t.Key
+
+       iter.m.mustBeExported() // do not let unexported m leak
+       key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
+       key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target)
+       typedmemmove(v.typ(), v.ptr, key.ptr)
+}
+
+// Value returns the value of iter's current map entry.
+func (iter *MapIter) Value() Value {
+       if !iter.hiter.initialized() {
+               panic("MapIter.Value called before Next")
+       }
+       iterelem := mapiterelem(&iter.hiter)
+       if iterelem == nil {
+               panic("MapIter.Value called on exhausted iterator")
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       vtype := t.Elem
+       return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
+}
+
+// SetIterValue assigns to v the value of iter's current map entry.
+// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
+// As in Go, the value must be assignable to v's type and
+// must not be derived from an unexported field.
+func (v Value) SetIterValue(iter *MapIter) {
+       if !iter.hiter.initialized() {
+               panic("reflect: Value.SetIterValue called before Next")
+       }
+       iterelem := mapiterelem(&iter.hiter)
+       if iterelem == nil {
+               panic("reflect: Value.SetIterValue called on exhausted iterator")
+       }
+
+       v.mustBeAssignable()
+       var target unsafe.Pointer
+       if v.kind() == Interface {
+               target = v.ptr
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       vtype := t.Elem
+
+       iter.m.mustBeExported() // do not let unexported m leak
+       elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
+       elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target)
+       typedmemmove(v.typ(), v.ptr, elem.ptr)
+}
+
+// Next advances the map iterator and reports whether there is another
+// entry. It returns false when iter is exhausted; subsequent
+// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic.
+func (iter *MapIter) Next() bool {
+       if !iter.m.IsValid() {
+               panic("MapIter.Next called on an iterator that does not have an associated map Value")
+       }
+       if !iter.hiter.initialized() {
+               mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
+       } else {
+               if mapiterkey(&iter.hiter) == nil {
+                       panic("MapIter.Next called on exhausted iterator")
+               }
+               mapiternext(&iter.hiter)
+       }
+       return mapiterkey(&iter.hiter) != nil
+}
+
+// Reset modifies iter to iterate over v.
+// It panics if v's Kind is not [Map] and v is not the zero Value.
+// Reset(Value{}) causes iter to not to refer to any map,
+// which may allow the previously iterated-over map to be garbage collected.
+func (iter *MapIter) Reset(v Value) {
+       if v.IsValid() {
+               v.mustBe(Map)
+       }
+       iter.m = v
+       iter.hiter = hiter{}
+}
+
+// MapRange returns a range iterator for a map.
+// It panics if v's Kind is not [Map].
+//
+// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry.
+// [MapIter.Next] returns false when the iterator is exhausted.
+// MapRange follows the same iteration semantics as a range statement.
+//
+// Example:
+//
+//     iter := reflect.ValueOf(m).MapRange()
+//     for iter.Next() {
+//             k := iter.Key()
+//             v := iter.Value()
+//             ...
+//     }
+func (v Value) MapRange() *MapIter {
+       // This is inlinable to take advantage of "function outlining".
+       // The allocation of MapIter can be stack allocated if the caller
+       // does not allow it to escape.
+       // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
+       if v.kind() != Map {
+               v.panicNotMap()
+       }
+       return &MapIter{m: v}
+}
+
+// SetMapIndex sets the element associated with key in the map v to elem.
+// It panics if v's Kind is not [Map].
+// If elem is the zero Value, SetMapIndex deletes the key from the map.
+// Otherwise if v holds a nil map, SetMapIndex will panic.
+// As in Go, key's elem must be assignable to the map's key type,
+// and elem's value must be assignable to the map's elem type.
+func (v Value) SetMapIndex(key, elem Value) {
+       v.mustBe(Map)
+       v.mustBeExported()
+       key.mustBeExported()
+       tt := (*mapType)(unsafe.Pointer(v.typ()))
+
+       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+               k := *(*string)(key.ptr)
+               if elem.typ() == nil {
+                       mapdelete_faststr(v.typ(), v.pointer(), k)
+                       return
+               }
+               elem.mustBeExported()
+               elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
+               var e unsafe.Pointer
+               if elem.flag&flagIndir != 0 {
+                       e = elem.ptr
+               } else {
+                       e = unsafe.Pointer(&elem.ptr)
+               }
+               mapassign_faststr(v.typ(), v.pointer(), k, e)
+               return
+       }
+
+       key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil)
+       var k unsafe.Pointer
+       if key.flag&flagIndir != 0 {
+               k = key.ptr
+       } else {
+               k = unsafe.Pointer(&key.ptr)
+       }
+       if elem.typ() == nil {
+               mapdelete(v.typ(), v.pointer(), k)
+               return
+       }
+       elem.mustBeExported()
+       elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
+       var e unsafe.Pointer
+       if elem.flag&flagIndir != 0 {
+               e = elem.ptr
+       } else {
+               e = unsafe.Pointer(&elem.ptr)
+       }
+       mapassign(v.typ(), v.pointer(), k, e)
+}
+
+// Force slow panicking path not inlined, so it won't add to the
+// inlining budget of the caller.
+// TODO: undo when the inliner is no longer bottom-up only.
+//
+//go:noinline
+func (f flag) panicNotMap() {
+       f.mustBe(Map)
+}
diff --git a/src/reflect/map_swiss.go b/src/reflect/map_swiss.go
new file mode 100644 (file)
index 0000000..a42a9d6
--- /dev/null
@@ -0,0 +1,473 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package reflect
+
+import (
+       "internal/abi"
+       "internal/goarch"
+       "unsafe"
+)
+
+// mapType represents a map type.
+type mapType struct {
+       abi.MapType
+}
+
+func (t *rtype) Key() Type {
+       if t.Kind() != Map {
+               panic("reflect: Key of non-map type " + t.String())
+       }
+       tt := (*mapType)(unsafe.Pointer(t))
+       return toType(tt.Key)
+}
+
+// MapOf returns the map type with the given key and element types.
+// For example, if k represents int and e represents string,
+// MapOf(k, e) represents map[int]string.
+//
+// If the key type is not a valid map key type (that is, if it does
+// not implement Go's == operator), MapOf panics.
+func MapOf(key, elem Type) Type {
+       ktyp := key.common()
+       etyp := elem.common()
+
+       if ktyp.Equal == nil {
+               panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
+       }
+
+       // Look in cache.
+       ckey := cacheKey{Map, ktyp, etyp, 0}
+       if mt, ok := lookupCache.Load(ckey); ok {
+               return mt.(Type)
+       }
+
+       // Look in known types.
+       s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
+       for _, tt := range typesByString(s) {
+               mt := (*mapType)(unsafe.Pointer(tt))
+               if mt.Key == ktyp && mt.Elem == etyp {
+                       ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
+                       return ti.(Type)
+               }
+       }
+
+       // Make a map type.
+       // Note: flag values must match those used in the TMAP case
+       // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
+       var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
+       mt := **(**mapType)(unsafe.Pointer(&imap))
+       mt.Str = resolveReflectName(newName(s, "", false, false))
+       mt.TFlag = 0
+       mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
+       mt.Key = ktyp
+       mt.Elem = etyp
+       mt.Bucket = bucketOf(ktyp, etyp)
+       mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
+               return typehash(ktyp, p, seed)
+       }
+       mt.Flags = 0
+       if ktyp.Size_ > abi.MapMaxKeyBytes {
+               mt.KeySize = uint8(goarch.PtrSize)
+               mt.Flags |= 1 // indirect key
+       } else {
+               mt.KeySize = uint8(ktyp.Size_)
+       }
+       if etyp.Size_ > abi.MapMaxElemBytes {
+               mt.ValueSize = uint8(goarch.PtrSize)
+               mt.Flags |= 2 // indirect value
+       } else {
+               mt.ValueSize = uint8(etyp.Size_)
+       }
+       mt.BucketSize = uint16(mt.Bucket.Size_)
+       if isReflexive(ktyp) {
+               mt.Flags |= 4
+       }
+       if needKeyUpdate(ktyp) {
+               mt.Flags |= 8
+       }
+       if hashMightPanic(ktyp) {
+               mt.Flags |= 16
+       }
+       mt.PtrToThis = 0
+
+       ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
+       return ti.(Type)
+}
+
+func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
+       if ktyp.Size_ > abi.MapMaxKeyBytes {
+               ktyp = ptrTo(ktyp)
+       }
+       if etyp.Size_ > abi.MapMaxElemBytes {
+               etyp = ptrTo(etyp)
+       }
+
+       // Prepare GC data if any.
+       // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
+       // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
+       // Note that since the key and value are known to be <= 128 bytes,
+       // they're guaranteed to have bitmaps instead of GC programs.
+       var gcdata *byte
+       var ptrdata uintptr
+
+       size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
+       if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
+               panic("reflect: bad size computation in MapOf")
+       }
+
+       if ktyp.Pointers() || etyp.Pointers() {
+               nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
+               n := (nptr + 7) / 8
+
+               // Runtime needs pointer masks to be a multiple of uintptr in size.
+               n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
+               mask := make([]byte, n)
+               base := uintptr(abi.MapBucketCount / goarch.PtrSize)
+
+               if ktyp.Pointers() {
+                       emitGCMask(mask, base, ktyp, abi.MapBucketCount)
+               }
+               base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
+
+               if etyp.Pointers() {
+                       emitGCMask(mask, base, etyp, abi.MapBucketCount)
+               }
+               base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
+
+               word := base
+               mask[word/8] |= 1 << (word % 8)
+               gcdata = &mask[0]
+               ptrdata = (word + 1) * goarch.PtrSize
+
+               // overflow word must be last
+               if ptrdata != size {
+                       panic("reflect: bad layout computation in MapOf")
+               }
+       }
+
+       b := &abi.Type{
+               Align_:   goarch.PtrSize,
+               Size_:    size,
+               Kind_:    abi.Struct,
+               PtrBytes: ptrdata,
+               GCData:   gcdata,
+       }
+       s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
+       b.Str = resolveReflectName(newName(s, "", false, false))
+       return b
+}
+
+var stringType = rtypeOf("")
+
+// MapIndex returns the value associated with key in the map v.
+// It panics if v's Kind is not [Map].
+// It returns the zero Value if key is not found in the map or if v represents a nil map.
+// As in Go, the key's value must be assignable to the map's key type.
+func (v Value) MapIndex(key Value) Value {
+       v.mustBe(Map)
+       tt := (*mapType)(unsafe.Pointer(v.typ()))
+
+       // Do not require key to be exported, so that DeepEqual
+       // and other programs can use all the keys returned by
+       // MapKeys as arguments to MapIndex. If either the map
+       // or the key is unexported, though, the result will be
+       // considered unexported. This is consistent with the
+       // behavior for structs, which allow read but not write
+       // of unexported fields.
+
+       var e unsafe.Pointer
+       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+               k := *(*string)(key.ptr)
+               e = mapaccess_faststr(v.typ(), v.pointer(), k)
+       } else {
+               key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil)
+               var k unsafe.Pointer
+               if key.flag&flagIndir != 0 {
+                       k = key.ptr
+               } else {
+                       k = unsafe.Pointer(&key.ptr)
+               }
+               e = mapaccess(v.typ(), v.pointer(), k)
+       }
+       if e == nil {
+               return Value{}
+       }
+       typ := tt.Elem
+       fl := (v.flag | key.flag).ro()
+       fl |= flag(typ.Kind())
+       return copyVal(typ, fl, e)
+}
+
+// MapKeys returns a slice containing all the keys present in the map,
+// in unspecified order.
+// It panics if v's Kind is not [Map].
+// It returns an empty slice if v represents a nil map.
+func (v Value) MapKeys() []Value {
+       v.mustBe(Map)
+       tt := (*mapType)(unsafe.Pointer(v.typ()))
+       keyType := tt.Key
+
+       fl := v.flag.ro() | flag(keyType.Kind())
+
+       m := v.pointer()
+       mlen := int(0)
+       if m != nil {
+               mlen = maplen(m)
+       }
+       var it hiter
+       mapiterinit(v.typ(), m, &it)
+       a := make([]Value, mlen)
+       var i int
+       for i = 0; i < len(a); i++ {
+               key := mapiterkey(&it)
+               if key == nil {
+                       // Someone deleted an entry from the map since we
+                       // called maplen above. It's a data race, but nothing
+                       // we can do about it.
+                       break
+               }
+               a[i] = copyVal(keyType, fl, key)
+               mapiternext(&it)
+       }
+       return a[:i]
+}
+
+// hiter's structure matches runtime.hiter's structure.
+// Having a clone here allows us to embed a map iterator
+// inside type MapIter so that MapIters can be re-used
+// without doing any allocations.
+type hiter struct {
+       key         unsafe.Pointer
+       elem        unsafe.Pointer
+       t           unsafe.Pointer
+       h           unsafe.Pointer
+       buckets     unsafe.Pointer
+       bptr        unsafe.Pointer
+       overflow    *[]unsafe.Pointer
+       oldoverflow *[]unsafe.Pointer
+       startBucket uintptr
+       offset      uint8
+       wrapped     bool
+       B           uint8
+       i           uint8
+       bucket      uintptr
+       checkBucket uintptr
+}
+
+func (h *hiter) initialized() bool {
+       return h.t != nil
+}
+
+// A MapIter is an iterator for ranging over a map.
+// See [Value.MapRange].
+type MapIter struct {
+       m     Value
+       hiter hiter
+}
+
+// Key returns the key of iter's current map entry.
+func (iter *MapIter) Key() Value {
+       if !iter.hiter.initialized() {
+               panic("MapIter.Key called before Next")
+       }
+       iterkey := mapiterkey(&iter.hiter)
+       if iterkey == nil {
+               panic("MapIter.Key called on exhausted iterator")
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       ktype := t.Key
+       return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
+}
+
+// SetIterKey assigns to v the key of iter's current map entry.
+// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
+// As in Go, the key must be assignable to v's type and
+// must not be derived from an unexported field.
+func (v Value) SetIterKey(iter *MapIter) {
+       if !iter.hiter.initialized() {
+               panic("reflect: Value.SetIterKey called before Next")
+       }
+       iterkey := mapiterkey(&iter.hiter)
+       if iterkey == nil {
+               panic("reflect: Value.SetIterKey called on exhausted iterator")
+       }
+
+       v.mustBeAssignable()
+       var target unsafe.Pointer
+       if v.kind() == Interface {
+               target = v.ptr
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       ktype := t.Key
+
+       iter.m.mustBeExported() // do not let unexported m leak
+       key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
+       key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target)
+       typedmemmove(v.typ(), v.ptr, key.ptr)
+}
+
+// Value returns the value of iter's current map entry.
+func (iter *MapIter) Value() Value {
+       if !iter.hiter.initialized() {
+               panic("MapIter.Value called before Next")
+       }
+       iterelem := mapiterelem(&iter.hiter)
+       if iterelem == nil {
+               panic("MapIter.Value called on exhausted iterator")
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       vtype := t.Elem
+       return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
+}
+
+// SetIterValue assigns to v the value of iter's current map entry.
+// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
+// As in Go, the value must be assignable to v's type and
+// must not be derived from an unexported field.
+func (v Value) SetIterValue(iter *MapIter) {
+       if !iter.hiter.initialized() {
+               panic("reflect: Value.SetIterValue called before Next")
+       }
+       iterelem := mapiterelem(&iter.hiter)
+       if iterelem == nil {
+               panic("reflect: Value.SetIterValue called on exhausted iterator")
+       }
+
+       v.mustBeAssignable()
+       var target unsafe.Pointer
+       if v.kind() == Interface {
+               target = v.ptr
+       }
+
+       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       vtype := t.Elem
+
+       iter.m.mustBeExported() // do not let unexported m leak
+       elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
+       elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target)
+       typedmemmove(v.typ(), v.ptr, elem.ptr)
+}
+
+// Next advances the map iterator and reports whether there is another
+// entry. It returns false when iter is exhausted; subsequent
+// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic.
+func (iter *MapIter) Next() bool {
+       if !iter.m.IsValid() {
+               panic("MapIter.Next called on an iterator that does not have an associated map Value")
+       }
+       if !iter.hiter.initialized() {
+               mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
+       } else {
+               if mapiterkey(&iter.hiter) == nil {
+                       panic("MapIter.Next called on exhausted iterator")
+               }
+               mapiternext(&iter.hiter)
+       }
+       return mapiterkey(&iter.hiter) != nil
+}
+
+// Reset modifies iter to iterate over v.
+// It panics if v's Kind is not [Map] and v is not the zero Value.
+// Reset(Value{}) causes iter to not to refer to any map,
+// which may allow the previously iterated-over map to be garbage collected.
+func (iter *MapIter) Reset(v Value) {
+       if v.IsValid() {
+               v.mustBe(Map)
+       }
+       iter.m = v
+       iter.hiter = hiter{}
+}
+
+// MapRange returns a range iterator for a map.
+// It panics if v's Kind is not [Map].
+//
+// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry.
+// [MapIter.Next] returns false when the iterator is exhausted.
+// MapRange follows the same iteration semantics as a range statement.
+//
+// Example:
+//
+//     iter := reflect.ValueOf(m).MapRange()
+//     for iter.Next() {
+//             k := iter.Key()
+//             v := iter.Value()
+//             ...
+//     }
+func (v Value) MapRange() *MapIter {
+       // This is inlinable to take advantage of "function outlining".
+       // The allocation of MapIter can be stack allocated if the caller
+       // does not allow it to escape.
+       // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
+       if v.kind() != Map {
+               v.panicNotMap()
+       }
+       return &MapIter{m: v}
+}
+
+// SetMapIndex sets the element associated with key in the map v to elem.
+// It panics if v's Kind is not [Map].
+// If elem is the zero Value, SetMapIndex deletes the key from the map.
+// Otherwise if v holds a nil map, SetMapIndex will panic.
+// As in Go, key's elem must be assignable to the map's key type,
+// and elem's value must be assignable to the map's elem type.
+func (v Value) SetMapIndex(key, elem Value) {
+       v.mustBe(Map)
+       v.mustBeExported()
+       key.mustBeExported()
+       tt := (*mapType)(unsafe.Pointer(v.typ()))
+
+       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+               k := *(*string)(key.ptr)
+               if elem.typ() == nil {
+                       mapdelete_faststr(v.typ(), v.pointer(), k)
+                       return
+               }
+               elem.mustBeExported()
+               elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
+               var e unsafe.Pointer
+               if elem.flag&flagIndir != 0 {
+                       e = elem.ptr
+               } else {
+                       e = unsafe.Pointer(&elem.ptr)
+               }
+               mapassign_faststr(v.typ(), v.pointer(), k, e)
+               return
+       }
+
+       key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil)
+       var k unsafe.Pointer
+       if key.flag&flagIndir != 0 {
+               k = key.ptr
+       } else {
+               k = unsafe.Pointer(&key.ptr)
+       }
+       if elem.typ() == nil {
+               mapdelete(v.typ(), v.pointer(), k)
+               return
+       }
+       elem.mustBeExported()
+       elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
+       var e unsafe.Pointer
+       if elem.flag&flagIndir != 0 {
+               e = elem.ptr
+       } else {
+               e = unsafe.Pointer(&elem.ptr)
+       }
+       mapassign(v.typ(), v.pointer(), k, e)
+}
+
+// Force slow panicking path not inlined, so it won't add to the
+// inlining budget of the caller.
+// TODO: undo when the inliner is no longer bottom-up only.
+//
+//go:noinline
+func (f flag) panicNotMap() {
+       f.mustBe(Map)
+}
index 07e2bf16447bbb94238f5f07dc098ae29f949a00..f790182547d34ea294adda0b5a058f145d05dea2 100644 (file)
@@ -262,7 +262,7 @@ type Type interface {
 /*
  * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go).
  * A few are known to ../runtime/type.go to convey to debuggers.
- * They are also known to ../runtime/type.go.
+ * They are also known to ../internal/abi/type.go.
  */
 
 // A Kind represents the specific kind of type that a [Type] represents.
@@ -388,11 +388,6 @@ func (t *interfaceType) uncommon() *abi.UncommonType {
        return t.Uncommon()
 }
 
-// mapType represents a map type.
-type mapType struct {
-       abi.MapType
-}
-
 // ptrType represents a pointer type.
 type ptrType struct {
        abi.PtrType
@@ -773,14 +768,6 @@ func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
        return tt.FieldByNameFunc(match)
 }
 
-func (t *rtype) Key() Type {
-       if t.Kind() != Map {
-               panic("reflect: Key of non-map type " + t.String())
-       }
-       tt := (*mapType)(unsafe.Pointer(t))
-       return toType(tt.Key)
-}
-
 func (t *rtype) Len() int {
        if t.Kind() != Array {
                panic("reflect: Len of non-array type " + t.String())
@@ -1801,79 +1788,6 @@ func ChanOf(dir ChanDir, t Type) Type {
        return ti.(Type)
 }
 
-// MapOf returns the map type with the given key and element types.
-// For example, if k represents int and e represents string,
-// MapOf(k, e) represents map[int]string.
-//
-// If the key type is not a valid map key type (that is, if it does
-// not implement Go's == operator), MapOf panics.
-func MapOf(key, elem Type) Type {
-       ktyp := key.common()
-       etyp := elem.common()
-
-       if ktyp.Equal == nil {
-               panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
-       }
-
-       // Look in cache.
-       ckey := cacheKey{Map, ktyp, etyp, 0}
-       if mt, ok := lookupCache.Load(ckey); ok {
-               return mt.(Type)
-       }
-
-       // Look in known types.
-       s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
-       for _, tt := range typesByString(s) {
-               mt := (*mapType)(unsafe.Pointer(tt))
-               if mt.Key == ktyp && mt.Elem == etyp {
-                       ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
-                       return ti.(Type)
-               }
-       }
-
-       // Make a map type.
-       // Note: flag values must match those used in the TMAP case
-       // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
-       var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
-       mt := **(**mapType)(unsafe.Pointer(&imap))
-       mt.Str = resolveReflectName(newName(s, "", false, false))
-       mt.TFlag = 0
-       mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
-       mt.Key = ktyp
-       mt.Elem = etyp
-       mt.Bucket = bucketOf(ktyp, etyp)
-       mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
-               return typehash(ktyp, p, seed)
-       }
-       mt.Flags = 0
-       if ktyp.Size_ > abi.MapMaxKeyBytes {
-               mt.KeySize = uint8(goarch.PtrSize)
-               mt.Flags |= 1 // indirect key
-       } else {
-               mt.KeySize = uint8(ktyp.Size_)
-       }
-       if etyp.Size_ > abi.MapMaxElemBytes {
-               mt.ValueSize = uint8(goarch.PtrSize)
-               mt.Flags |= 2 // indirect value
-       } else {
-               mt.MapType.ValueSize = uint8(etyp.Size_)
-       }
-       mt.MapType.BucketSize = uint16(mt.Bucket.Size_)
-       if isReflexive(ktyp) {
-               mt.Flags |= 4
-       }
-       if needKeyUpdate(ktyp) {
-               mt.Flags |= 8
-       }
-       if hashMightPanic(ktyp) {
-               mt.Flags |= 16
-       }
-       mt.PtrToThis = 0
-
-       ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
-       return ti.(Type)
-}
-
 var funcTypes []Type
 var funcTypesMutex sync.Mutex
 
@@ -2106,69 +2020,6 @@ func hashMightPanic(t *abi.Type) bool {
        }
 }
 
-func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
-       if ktyp.Size_ > abi.MapMaxKeyBytes {
-               ktyp = ptrTo(ktyp)
-       }
-       if etyp.Size_ > abi.MapMaxElemBytes {
-               etyp = ptrTo(etyp)
-       }
-
-       // Prepare GC data if any.
-       // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
-       // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
-       // Note that since the key and value are known to be <= 128 bytes,
-       // they're guaranteed to have bitmaps instead of GC programs.
-       var gcdata *byte
-       var ptrdata uintptr
-
-       size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
-       if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
-               panic("reflect: bad size computation in MapOf")
-       }
-
-       if ktyp.Pointers() || etyp.Pointers() {
-               nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
-               n := (nptr + 7) / 8
-
-               // Runtime needs pointer masks to be a multiple of uintptr in size.
-               n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
-               mask := make([]byte, n)
-               base := uintptr(abi.MapBucketCount / goarch.PtrSize)
-
-               if ktyp.Pointers() {
-                       emitGCMask(mask, base, ktyp, abi.MapBucketCount)
-               }
-               base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
-
-               if etyp.Pointers() {
-                       emitGCMask(mask, base, etyp, abi.MapBucketCount)
-               }
-               base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
-
-               word := base
-               mask[word/8] |= 1 << (word % 8)
-               gcdata = &mask[0]
-               ptrdata = (word + 1) * goarch.PtrSize
-
-               // overflow word must be last
-               if ptrdata != size {
-                       panic("reflect: bad layout computation in MapOf")
-               }
-       }
-
-       b := &abi.Type{
-               Align_:   goarch.PtrSize,
-               Size_:    size,
-               Kind_:    abi.Struct,
-               PtrBytes: ptrdata,
-               GCData:   gcdata,
-       }
-       s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
-       b.Str = resolveReflectName(newName(s, "", false, false))
-       return b
-}
-
 func (t *rtype) gcSlice(begin, end uintptr) []byte {
        return (*[1 << 30]byte)(unsafe.Pointer(t.t.GCData))[begin:end:end]
 }
index 8df7d13114d3de91faaba15dd6c71f90c908211a..7bac530b5bc9324354b83242703726cf70f93b92 100644 (file)
@@ -1779,265 +1779,6 @@ func (v Value) lenNonSlice() int {
        panic(&ValueError{"reflect.Value.Len", v.kind()})
 }
 
-var stringType = rtypeOf("")
-
-// MapIndex returns the value associated with key in the map v.
-// It panics if v's Kind is not [Map].
-// It returns the zero Value if key is not found in the map or if v represents a nil map.
-// As in Go, the key's value must be assignable to the map's key type.
-func (v Value) MapIndex(key Value) Value {
-       v.mustBe(Map)
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
-
-       // Do not require key to be exported, so that DeepEqual
-       // and other programs can use all the keys returned by
-       // MapKeys as arguments to MapIndex. If either the map
-       // or the key is unexported, though, the result will be
-       // considered unexported. This is consistent with the
-       // behavior for structs, which allow read but not write
-       // of unexported fields.
-
-       var e unsafe.Pointer
-       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
-               k := *(*string)(key.ptr)
-               e = mapaccess_faststr(v.typ(), v.pointer(), k)
-       } else {
-               key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil)
-               var k unsafe.Pointer
-               if key.flag&flagIndir != 0 {
-                       k = key.ptr
-               } else {
-                       k = unsafe.Pointer(&key.ptr)
-               }
-               e = mapaccess(v.typ(), v.pointer(), k)
-       }
-       if e == nil {
-               return Value{}
-       }
-       typ := tt.Elem
-       fl := (v.flag | key.flag).ro()
-       fl |= flag(typ.Kind())
-       return copyVal(typ, fl, e)
-}
-
-// MapKeys returns a slice containing all the keys present in the map,
-// in unspecified order.
-// It panics if v's Kind is not [Map].
-// It returns an empty slice if v represents a nil map.
-func (v Value) MapKeys() []Value {
-       v.mustBe(Map)
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
-       keyType := tt.Key
-
-       fl := v.flag.ro() | flag(keyType.Kind())
-
-       m := v.pointer()
-       mlen := int(0)
-       if m != nil {
-               mlen = maplen(m)
-       }
-       var it hiter
-       mapiterinit(v.typ(), m, &it)
-       a := make([]Value, mlen)
-       var i int
-       for i = 0; i < len(a); i++ {
-               key := mapiterkey(&it)
-               if key == nil {
-                       // Someone deleted an entry from the map since we
-                       // called maplen above. It's a data race, but nothing
-                       // we can do about it.
-                       break
-               }
-               a[i] = copyVal(keyType, fl, key)
-               mapiternext(&it)
-       }
-       return a[:i]
-}
-
-// hiter's structure matches runtime.hiter's structure.
-// Having a clone here allows us to embed a map iterator
-// inside type MapIter so that MapIters can be re-used
-// without doing any allocations.
-type hiter struct {
-       key         unsafe.Pointer
-       elem        unsafe.Pointer
-       t           unsafe.Pointer
-       h           unsafe.Pointer
-       buckets     unsafe.Pointer
-       bptr        unsafe.Pointer
-       overflow    *[]unsafe.Pointer
-       oldoverflow *[]unsafe.Pointer
-       startBucket uintptr
-       offset      uint8
-       wrapped     bool
-       B           uint8
-       i           uint8
-       bucket      uintptr
-       checkBucket uintptr
-}
-
-func (h *hiter) initialized() bool {
-       return h.t != nil
-}
-
-// A MapIter is an iterator for ranging over a map.
-// See [Value.MapRange].
-type MapIter struct {
-       m     Value
-       hiter hiter
-}
-
-// Key returns the key of iter's current map entry.
-func (iter *MapIter) Key() Value {
-       if !iter.hiter.initialized() {
-               panic("MapIter.Key called before Next")
-       }
-       iterkey := mapiterkey(&iter.hiter)
-       if iterkey == nil {
-               panic("MapIter.Key called on exhausted iterator")
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       ktype := t.Key
-       return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
-}
-
-// SetIterKey assigns to v the key of iter's current map entry.
-// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
-// As in Go, the key must be assignable to v's type and
-// must not be derived from an unexported field.
-func (v Value) SetIterKey(iter *MapIter) {
-       if !iter.hiter.initialized() {
-               panic("reflect: Value.SetIterKey called before Next")
-       }
-       iterkey := mapiterkey(&iter.hiter)
-       if iterkey == nil {
-               panic("reflect: Value.SetIterKey called on exhausted iterator")
-       }
-
-       v.mustBeAssignable()
-       var target unsafe.Pointer
-       if v.kind() == Interface {
-               target = v.ptr
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       ktype := t.Key
-
-       iter.m.mustBeExported() // do not let unexported m leak
-       key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
-       key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target)
-       typedmemmove(v.typ(), v.ptr, key.ptr)
-}
-
-// Value returns the value of iter's current map entry.
-func (iter *MapIter) Value() Value {
-       if !iter.hiter.initialized() {
-               panic("MapIter.Value called before Next")
-       }
-       iterelem := mapiterelem(&iter.hiter)
-       if iterelem == nil {
-               panic("MapIter.Value called on exhausted iterator")
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       vtype := t.Elem
-       return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
-}
-
-// SetIterValue assigns to v the value of iter's current map entry.
-// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
-// As in Go, the value must be assignable to v's type and
-// must not be derived from an unexported field.
-func (v Value) SetIterValue(iter *MapIter) {
-       if !iter.hiter.initialized() {
-               panic("reflect: Value.SetIterValue called before Next")
-       }
-       iterelem := mapiterelem(&iter.hiter)
-       if iterelem == nil {
-               panic("reflect: Value.SetIterValue called on exhausted iterator")
-       }
-
-       v.mustBeAssignable()
-       var target unsafe.Pointer
-       if v.kind() == Interface {
-               target = v.ptr
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       vtype := t.Elem
-
-       iter.m.mustBeExported() // do not let unexported m leak
-       elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
-       elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target)
-       typedmemmove(v.typ(), v.ptr, elem.ptr)
-}
-
-// Next advances the map iterator and reports whether there is another
-// entry. It returns false when iter is exhausted; subsequent
-// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic.
-func (iter *MapIter) Next() bool {
-       if !iter.m.IsValid() {
-               panic("MapIter.Next called on an iterator that does not have an associated map Value")
-       }
-       if !iter.hiter.initialized() {
-               mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
-       } else {
-               if mapiterkey(&iter.hiter) == nil {
-                       panic("MapIter.Next called on exhausted iterator")
-               }
-               mapiternext(&iter.hiter)
-       }
-       return mapiterkey(&iter.hiter) != nil
-}
-
-// Reset modifies iter to iterate over v.
-// It panics if v's Kind is not [Map] and v is not the zero Value.
-// Reset(Value{}) causes iter to not to refer to any map,
-// which may allow the previously iterated-over map to be garbage collected.
-func (iter *MapIter) Reset(v Value) {
-       if v.IsValid() {
-               v.mustBe(Map)
-       }
-       iter.m = v
-       iter.hiter = hiter{}
-}
-
-// MapRange returns a range iterator for a map.
-// It panics if v's Kind is not [Map].
-//
-// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry.
-// [MapIter.Next] returns false when the iterator is exhausted.
-// MapRange follows the same iteration semantics as a range statement.
-//
-// Example:
-//
-//     iter := reflect.ValueOf(m).MapRange()
-//     for iter.Next() {
-//             k := iter.Key()
-//             v := iter.Value()
-//             ...
-//     }
-func (v Value) MapRange() *MapIter {
-       // This is inlinable to take advantage of "function outlining".
-       // The allocation of MapIter can be stack allocated if the caller
-       // does not allow it to escape.
-       // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
-       if v.kind() != Map {
-               v.panicNotMap()
-       }
-       return &MapIter{m: v}
-}
-
-// Force slow panicking path not inlined, so it won't add to the
-// inlining budget of the caller.
-// TODO: undo when the inliner is no longer bottom-up only.
-//
-//go:noinline
-func (f flag) panicNotMap() {
-       f.mustBe(Map)
-}
-
 // copyVal returns a Value containing the map key or value at ptr,
 // allocating a new variable as needed.
 func copyVal(typ *abi.Type, fl flag, ptr unsafe.Pointer) Value {
@@ -2426,58 +2167,6 @@ func (v Value) SetCap(n int) {
        s.Cap = n
 }
 
-// SetMapIndex sets the element associated with key in the map v to elem.
-// It panics if v's Kind is not [Map].
-// If elem is the zero Value, SetMapIndex deletes the key from the map.
-// Otherwise if v holds a nil map, SetMapIndex will panic.
-// As in Go, key's elem must be assignable to the map's key type,
-// and elem's value must be assignable to the map's elem type.
-func (v Value) SetMapIndex(key, elem Value) {
-       v.mustBe(Map)
-       v.mustBeExported()
-       key.mustBeExported()
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
-
-       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
-               k := *(*string)(key.ptr)
-               if elem.typ() == nil {
-                       mapdelete_faststr(v.typ(), v.pointer(), k)
-                       return
-               }
-               elem.mustBeExported()
-               elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
-               var e unsafe.Pointer
-               if elem.flag&flagIndir != 0 {
-                       e = elem.ptr
-               } else {
-                       e = unsafe.Pointer(&elem.ptr)
-               }
-               mapassign_faststr(v.typ(), v.pointer(), k, e)
-               return
-       }
-
-       key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil)
-       var k unsafe.Pointer
-       if key.flag&flagIndir != 0 {
-               k = key.ptr
-       } else {
-               k = unsafe.Pointer(&key.ptr)
-       }
-       if elem.typ() == nil {
-               mapdelete(v.typ(), v.pointer(), k)
-               return
-       }
-       elem.mustBeExported()
-       elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
-       var e unsafe.Pointer
-       if elem.flag&flagIndir != 0 {
-               e = elem.ptr
-       } else {
-               e = unsafe.Pointer(&elem.ptr)
-       }
-       mapassign(v.typ(), v.pointer(), k, e)
-}
-
 // SetUint sets v's underlying value to x.
 // It panics if v's Kind is not [Uint], [Uintptr], [Uint8], [Uint16], [Uint32], or [Uint64], or if [Value.CanSet] returns false.
 func (v Value) SetUint(x uint64) {
similarity index 99%
rename from src/runtime/map_fast32.go
rename to src/runtime/map_fast32_noswiss.go
index 0eb8562f5102dcd773bd6e61b904bd75863684a6..58d5a050f50b4e30938257a876cb3517ac5e22d9 100644 (file)
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !goexperiment.swissmap
+
 package runtime
 
 import (
diff --git a/src/runtime/map_fast32_swiss.go b/src/runtime/map_fast32_swiss.go
new file mode 100644 (file)
index 0000000..a6fb267
--- /dev/null
@@ -0,0 +1,464 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package runtime
+
+import (
+       "internal/abi"
+       "internal/goarch"
+       "unsafe"
+)
+
+func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+               m := bucketMask(h.B)
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+               if c := h.oldbuckets; c != nil {
+                       if !h.sameSizeGrow() {
+                               // There used to be half as many buckets; mask down one more power of two.
+                               m >>= 1
+                       }
+                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for ; b != nil; b = b.overflow(t) {
+               for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
+                       if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+               m := bucketMask(h.B)
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+               if c := h.oldbuckets; c != nil {
+                       if !h.sameSizeGrow() {
+                               // There used to be half as many buckets; mask down one more power of two.
+                               m >>= 1
+                       }
+                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for ; b != nil; b = b.overflow(t) {
+               for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
+                       if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0]), false
+}
+
+func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapassign.
+       h.flags ^= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast32(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+
+       var insertb *bmap
+       var inserti uintptr
+       var insertk unsafe.Pointer
+
+bucketloop:
+       for {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if isEmpty(b.tophash[i]) {
+                               if insertb == nil {
+                                       inserti = i
+                                       insertb = b
+                               }
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
+                       if k != key {
+                               continue
+                       }
+                       inserti = i
+                       insertb = b
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if insertb == nil {
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+               insertb = h.newoverflow(t, b)
+               inserti = 0 // not necessary, but avoids needlessly spilling inserti
+       }
+       insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+       // store new key at insert position
+       *(*uint32)(insertk) = key
+
+       h.count++
+
+done:
+       elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       return elem
+}
+
+func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapassign.
+       h.flags ^= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast32(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+
+       var insertb *bmap
+       var inserti uintptr
+       var insertk unsafe.Pointer
+
+bucketloop:
+       for {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if isEmpty(b.tophash[i]) {
+                               if insertb == nil {
+                                       inserti = i
+                                       insertb = b
+                               }
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
+                       if k != key {
+                               continue
+                       }
+                       inserti = i
+                       insertb = b
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if insertb == nil {
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+               insertb = h.newoverflow(t, b)
+               inserti = 0 // not necessary, but avoids needlessly spilling inserti
+       }
+       insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+       // store new key at insert position
+       *(*unsafe.Pointer)(insertk) = key
+
+       h.count++
+
+done:
+       elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       return elem
+}
+
+func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
+       }
+       if h == nil || h.count == 0 {
+               return
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+
+       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapdelete
+       h.flags ^= hashWriting
+
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast32(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+       bOrig := b
+search:
+       for ; b != nil; b = b.overflow(t) {
+               for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
+                       if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
+                               continue
+                       }
+                       // Only clear key if there are pointers in it.
+                       // This can only happen if pointers are 32 bit
+                       // wide as 64 bit pointers do not fit into a 32 bit key.
+                       if goarch.PtrSize == 4 && t.Key.Pointers() {
+                               // The key must be a pointer as we checked pointers are
+                               // 32 bits wide and the key is 32 bits wide also.
+                               *(*unsafe.Pointer)(k) = nil
+                       }
+                       e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
+                       if t.Elem.Pointers() {
+                               memclrHasPointers(e, t.Elem.Size_)
+                       } else {
+                               memclrNoHeapPointers(e, t.Elem.Size_)
+                       }
+                       b.tophash[i] = emptyOne
+                       // If the bucket now ends in a bunch of emptyOne states,
+                       // change those to emptyRest states.
+                       if i == abi.MapBucketCount-1 {
+                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+                                       goto notLast
+                               }
+                       } else {
+                               if b.tophash[i+1] != emptyRest {
+                                       goto notLast
+                               }
+                       }
+                       for {
+                               b.tophash[i] = emptyRest
+                               if i == 0 {
+                                       if b == bOrig {
+                                               break // beginning of initial bucket, we're done.
+                                       }
+                                       // Find previous bucket, continue at its last entry.
+                                       c := b
+                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+                                       }
+                                       i = abi.MapBucketCount - 1
+                               } else {
+                                       i--
+                               }
+                               if b.tophash[i] != emptyOne {
+                                       break
+                               }
+                       }
+               notLast:
+                       h.count--
+                       // Reset the hash seed to make it more difficult for attackers to
+                       // repeatedly trigger hash collisions. See issue 25237.
+                       if h.count == 0 {
+                               h.hash0 = uint32(rand())
+                       }
+                       break search
+               }
+       }
+
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+}
+
+func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
+       // make sure we evacuate the oldbucket corresponding
+       // to the bucket we're about to use
+       evacuate_fast32(t, h, bucket&h.oldbucketmask())
+
+       // evacuate one more oldbucket to make progress on growing
+       if h.growing() {
+               evacuate_fast32(t, h, h.nevacuate)
+       }
+}
+
+func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
+       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
+       newbit := h.noldbuckets()
+       if !evacuated(b) {
+               // TODO: reuse overflow buckets instead of using new ones, if there
+               // is no iterator using the old buckets.  (If !oldIterator.)
+
+               // xy contains the x and y (low and high) evacuation destinations.
+               var xy [2]evacDst
+               x := &xy[0]
+               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
+               x.k = add(unsafe.Pointer(x.b), dataOffset)
+               x.e = add(x.k, abi.MapBucketCount*4)
+
+               if !h.sameSizeGrow() {
+                       // Only calculate y pointers if we're growing bigger.
+                       // Otherwise GC can see bad pointers.
+                       y := &xy[1]
+                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
+                       y.k = add(unsafe.Pointer(y.b), dataOffset)
+                       y.e = add(y.k, abi.MapBucketCount*4)
+               }
+
+               for ; b != nil; b = b.overflow(t) {
+                       k := add(unsafe.Pointer(b), dataOffset)
+                       e := add(k, abi.MapBucketCount*4)
+                       for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
+                               top := b.tophash[i]
+                               if isEmpty(top) {
+                                       b.tophash[i] = evacuatedEmpty
+                                       continue
+                               }
+                               if top < minTopHash {
+                                       throw("bad map state")
+                               }
+                               var useY uint8
+                               if !h.sameSizeGrow() {
+                                       // Compute hash to make our evacuation decision (whether we need
+                                       // to send this key/elem to bucket x or bucket y).
+                                       hash := t.Hasher(k, uintptr(h.hash0))
+                                       if hash&newbit != 0 {
+                                               useY = 1
+                                       }
+                               }
+
+                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+                               dst := &xy[useY]                 // evacuation destination
+
+                               if dst.i == abi.MapBucketCount {
+                                       dst.b = h.newoverflow(t, dst.b)
+                                       dst.i = 0
+                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+                                       dst.e = add(dst.k, abi.MapBucketCount*4)
+                               }
+                               dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+
+                               // Copy key.
+                               if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
+                                       // Write with a write barrier.
+                                       *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
+                               } else {
+                                       *(*uint32)(dst.k) = *(*uint32)(k)
+                               }
+
+                               typedmemmove(t.Elem, dst.e, e)
+                               dst.i++
+                               // These updates might push these pointers past the end of the
+                               // key or elem arrays.  That's ok, as we have the overflow pointer
+                               // at the end of the bucket to protect against pointing past the
+                               // end of the bucket.
+                               dst.k = add(dst.k, 4)
+                               dst.e = add(dst.e, uintptr(t.ValueSize))
+                       }
+               }
+               // Unlink the overflow buckets & clear key/elem to help GC.
+               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
+                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
+                       // Preserve b.tophash because the evacuation
+                       // state is maintained there.
+                       ptr := add(b, dataOffset)
+                       n := uintptr(t.BucketSize) - dataOffset
+                       memclrHasPointers(ptr, n)
+               }
+       }
+
+       if oldbucket == h.nevacuate {
+               advanceEvacuationMark(h, t, newbit)
+       }
+}
similarity index 99%
rename from src/runtime/map_fast64.go
rename to src/runtime/map_fast64_noswiss.go
index aca60eb2a80b02dad6b51d040edcf5185327b4f6..bd3c1c34da0338c3045ce8de485bf79362e39ae9 100644 (file)
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !goexperiment.swissmap
+
 package runtime
 
 import (
diff --git a/src/runtime/map_fast64_swiss.go b/src/runtime/map_fast64_swiss.go
new file mode 100644 (file)
index 0000000..105b2ea
--- /dev/null
@@ -0,0 +1,472 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package runtime
+
+import (
+       "internal/abi"
+       "internal/goarch"
+       "unsafe"
+)
+
+func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+               m := bucketMask(h.B)
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+               if c := h.oldbuckets; c != nil {
+                       if !h.sameSizeGrow() {
+                               // There used to be half as many buckets; mask down one more power of two.
+                               m >>= 1
+                       }
+                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for ; b != nil; b = b.overflow(t) {
+               for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
+                       if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+               m := bucketMask(h.B)
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+               if c := h.oldbuckets; c != nil {
+                       if !h.sameSizeGrow() {
+                               // There used to be half as many buckets; mask down one more power of two.
+                               m >>= 1
+                       }
+                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for ; b != nil; b = b.overflow(t) {
+               for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
+                       if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0]), false
+}
+
+func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapassign.
+       h.flags ^= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast64(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+
+       var insertb *bmap
+       var inserti uintptr
+       var insertk unsafe.Pointer
+
+bucketloop:
+       for {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if isEmpty(b.tophash[i]) {
+                               if insertb == nil {
+                                       insertb = b
+                                       inserti = i
+                               }
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
+                       if k != key {
+                               continue
+                       }
+                       insertb = b
+                       inserti = i
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if insertb == nil {
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+               insertb = h.newoverflow(t, b)
+               inserti = 0 // not necessary, but avoids needlessly spilling inserti
+       }
+       insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+       // store new key at insert position
+       *(*uint64)(insertk) = key
+
+       h.count++
+
+done:
+       elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       return elem
+}
+
+func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapassign.
+       h.flags ^= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast64(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+
+       var insertb *bmap
+       var inserti uintptr
+       var insertk unsafe.Pointer
+
+bucketloop:
+       for {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if isEmpty(b.tophash[i]) {
+                               if insertb == nil {
+                                       insertb = b
+                                       inserti = i
+                               }
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
+                       if k != key {
+                               continue
+                       }
+                       insertb = b
+                       inserti = i
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if insertb == nil {
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+               insertb = h.newoverflow(t, b)
+               inserti = 0 // not necessary, but avoids needlessly spilling inserti
+       }
+       insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+       // store new key at insert position
+       *(*unsafe.Pointer)(insertk) = key
+
+       h.count++
+
+done:
+       elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       return elem
+}
+
+func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
+       }
+       if h == nil || h.count == 0 {
+               return
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+
+       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapdelete
+       h.flags ^= hashWriting
+
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast64(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+       bOrig := b
+search:
+       for ; b != nil; b = b.overflow(t) {
+               for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
+                       if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
+                               continue
+                       }
+                       // Only clear key if there are pointers in it.
+                       if t.Key.Pointers() {
+                               if goarch.PtrSize == 8 {
+                                       *(*unsafe.Pointer)(k) = nil
+                               } else {
+                                       // There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
+                                       // Just call memclrHasPointers instead of trying to handle all cases here.
+                                       memclrHasPointers(k, 8)
+                               }
+                       }
+                       e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
+                       if t.Elem.Pointers() {
+                               memclrHasPointers(e, t.Elem.Size_)
+                       } else {
+                               memclrNoHeapPointers(e, t.Elem.Size_)
+                       }
+                       b.tophash[i] = emptyOne
+                       // If the bucket now ends in a bunch of emptyOne states,
+                       // change those to emptyRest states.
+                       if i == abi.MapBucketCount-1 {
+                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+                                       goto notLast
+                               }
+                       } else {
+                               if b.tophash[i+1] != emptyRest {
+                                       goto notLast
+                               }
+                       }
+                       for {
+                               b.tophash[i] = emptyRest
+                               if i == 0 {
+                                       if b == bOrig {
+                                               break // beginning of initial bucket, we're done.
+                                       }
+                                       // Find previous bucket, continue at its last entry.
+                                       c := b
+                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+                                       }
+                                       i = abi.MapBucketCount - 1
+                               } else {
+                                       i--
+                               }
+                               if b.tophash[i] != emptyOne {
+                                       break
+                               }
+                       }
+               notLast:
+                       h.count--
+                       // Reset the hash seed to make it more difficult for attackers to
+                       // repeatedly trigger hash collisions. See issue 25237.
+                       if h.count == 0 {
+                               h.hash0 = uint32(rand())
+                       }
+                       break search
+               }
+       }
+
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+}
+
+func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
+       // make sure we evacuate the oldbucket corresponding
+       // to the bucket we're about to use
+       evacuate_fast64(t, h, bucket&h.oldbucketmask())
+
+       // evacuate one more oldbucket to make progress on growing
+       if h.growing() {
+               evacuate_fast64(t, h, h.nevacuate)
+       }
+}
+
+func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
+       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
+       newbit := h.noldbuckets()
+       if !evacuated(b) {
+               // TODO: reuse overflow buckets instead of using new ones, if there
+               // is no iterator using the old buckets.  (If !oldIterator.)
+
+               // xy contains the x and y (low and high) evacuation destinations.
+               var xy [2]evacDst
+               x := &xy[0]
+               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
+               x.k = add(unsafe.Pointer(x.b), dataOffset)
+               x.e = add(x.k, abi.MapBucketCount*8)
+
+               if !h.sameSizeGrow() {
+                       // Only calculate y pointers if we're growing bigger.
+                       // Otherwise GC can see bad pointers.
+                       y := &xy[1]
+                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
+                       y.k = add(unsafe.Pointer(y.b), dataOffset)
+                       y.e = add(y.k, abi.MapBucketCount*8)
+               }
+
+               for ; b != nil; b = b.overflow(t) {
+                       k := add(unsafe.Pointer(b), dataOffset)
+                       e := add(k, abi.MapBucketCount*8)
+                       for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
+                               top := b.tophash[i]
+                               if isEmpty(top) {
+                                       b.tophash[i] = evacuatedEmpty
+                                       continue
+                               }
+                               if top < minTopHash {
+                                       throw("bad map state")
+                               }
+                               var useY uint8
+                               if !h.sameSizeGrow() {
+                                       // Compute hash to make our evacuation decision (whether we need
+                                       // to send this key/elem to bucket x or bucket y).
+                                       hash := t.Hasher(k, uintptr(h.hash0))
+                                       if hash&newbit != 0 {
+                                               useY = 1
+                                       }
+                               }
+
+                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+                               dst := &xy[useY]                 // evacuation destination
+
+                               if dst.i == abi.MapBucketCount {
+                                       dst.b = h.newoverflow(t, dst.b)
+                                       dst.i = 0
+                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+                                       dst.e = add(dst.k, abi.MapBucketCount*8)
+                               }
+                               dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+
+                               // Copy key.
+                               if t.Key.Pointers() && writeBarrier.enabled {
+                                       if goarch.PtrSize == 8 {
+                                               // Write with a write barrier.
+                                               *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
+                                       } else {
+                                               // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
+                                               // Give up and call typedmemmove.
+                                               typedmemmove(t.Key, dst.k, k)
+                                       }
+                               } else {
+                                       *(*uint64)(dst.k) = *(*uint64)(k)
+                               }
+
+                               typedmemmove(t.Elem, dst.e, e)
+                               dst.i++
+                               // These updates might push these pointers past the end of the
+                               // key or elem arrays.  That's ok, as we have the overflow pointer
+                               // at the end of the bucket to protect against pointing past the
+                               // end of the bucket.
+                               dst.k = add(dst.k, 8)
+                               dst.e = add(dst.e, uintptr(t.ValueSize))
+                       }
+               }
+               // Unlink the overflow buckets & clear key/elem to help GC.
+               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
+                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
+                       // Preserve b.tophash because the evacuation
+                       // state is maintained there.
+                       ptr := add(b, dataOffset)
+                       n := uintptr(t.BucketSize) - dataOffset
+                       memclrHasPointers(ptr, n)
+               }
+       }
+
+       if oldbucket == h.nevacuate {
+               advanceEvacuationMark(h, t, newbit)
+       }
+}
similarity index 99%
rename from src/runtime/map_faststr.go
rename to src/runtime/map_faststr_noswiss.go
index 5461a9f81e8143c3a1819676e8abe3b1da437841..6dfe61ec465da069a6d48895b595e920c957c1cd 100644 (file)
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !goexperiment.swissmap
+
 package runtime
 
 import (
diff --git a/src/runtime/map_faststr_swiss.go b/src/runtime/map_faststr_swiss.go
new file mode 100644 (file)
index 0000000..9f3e14d
--- /dev/null
@@ -0,0 +1,487 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package runtime
+
+import (
+       "internal/abi"
+       "internal/goarch"
+       "unsafe"
+)
+
+func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       key := stringStructOf(&ky)
+       if h.B == 0 {
+               // One-bucket table.
+               b := (*bmap)(h.buckets)
+               if key.len < 32 {
+                       // short key, doing lots of comparisons is ok
+                       for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+                               k := (*stringStruct)(kptr)
+                               if k.len != key.len || isEmpty(b.tophash[i]) {
+                                       if b.tophash[i] == emptyRest {
+                                               break
+                                       }
+                                       continue
+                               }
+                               if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                                       return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+                               }
+                       }
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+               // long key, try not to do more comparisons than necessary
+               keymaybe := uintptr(abi.MapBucketCount)
+               for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+                       k := (*stringStruct)(kptr)
+                       if k.len != key.len || isEmpty(b.tophash[i]) {
+                               if b.tophash[i] == emptyRest {
+                                       break
+                               }
+                               continue
+                       }
+                       if k.str == key.str {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+                       }
+                       // check first 4 bytes
+                       if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+                               continue
+                       }
+                       // check last 4 bytes
+                       if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+                               continue
+                       }
+                       if keymaybe != abi.MapBucketCount {
+                               // Two keys are potential matches. Use hash to distinguish them.
+                               goto dohash
+                       }
+                       keymaybe = i
+               }
+               if keymaybe != abi.MapBucketCount {
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
+                       if memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
+                       }
+               }
+               return unsafe.Pointer(&zeroVal[0])
+       }
+dohash:
+       hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+       m := bucketMask(h.B)
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+       if c := h.oldbuckets; c != nil {
+               if !h.sameSizeGrow() {
+                       // There used to be half as many buckets; mask down one more power of two.
+                       m >>= 1
+               }
+               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := tophash(hash)
+       for ; b != nil; b = b.overflow(t) {
+               for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+                       k := (*stringStruct)(kptr)
+                       if k.len != key.len || b.tophash[i] != top {
+                               continue
+                       }
+                       if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       key := stringStructOf(&ky)
+       if h.B == 0 {
+               // One-bucket table.
+               b := (*bmap)(h.buckets)
+               if key.len < 32 {
+                       // short key, doing lots of comparisons is ok
+                       for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+                               k := (*stringStruct)(kptr)
+                               if k.len != key.len || isEmpty(b.tophash[i]) {
+                                       if b.tophash[i] == emptyRest {
+                                               break
+                                       }
+                                       continue
+                               }
+                               if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                                       return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
+                               }
+                       }
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+               // long key, try not to do more comparisons than necessary
+               keymaybe := uintptr(abi.MapBucketCount)
+               for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+                       k := (*stringStruct)(kptr)
+                       if k.len != key.len || isEmpty(b.tophash[i]) {
+                               if b.tophash[i] == emptyRest {
+                                       break
+                               }
+                               continue
+                       }
+                       if k.str == key.str {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
+                       }
+                       // check first 4 bytes
+                       if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+                               continue
+                       }
+                       // check last 4 bytes
+                       if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+                               continue
+                       }
+                       if keymaybe != abi.MapBucketCount {
+                               // Two keys are potential matches. Use hash to distinguish them.
+                               goto dohash
+                       }
+                       keymaybe = i
+               }
+               if keymaybe != abi.MapBucketCount {
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
+                       if memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
+                       }
+               }
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+dohash:
+       hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+       m := bucketMask(h.B)
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+       if c := h.oldbuckets; c != nil {
+               if !h.sameSizeGrow() {
+                       // There used to be half as many buckets; mask down one more power of two.
+                       m >>= 1
+               }
+               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := tophash(hash)
+       for ; b != nil; b = b.overflow(t) {
+               for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+                       k := (*stringStruct)(kptr)
+                       if k.len != key.len || b.tophash[i] != top {
+                               continue
+                       }
+                       if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0]), false
+}
+
+func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+       key := stringStructOf(&s)
+       hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapassign.
+       h.flags ^= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_faststr(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+       top := tophash(hash)
+
+       var insertb *bmap
+       var inserti uintptr
+       var insertk unsafe.Pointer
+
+bucketloop:
+       for {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if b.tophash[i] != top {
+                               if isEmpty(b.tophash[i]) && insertb == nil {
+                                       insertb = b
+                                       inserti = i
+                               }
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
+                       if k.len != key.len {
+                               continue
+                       }
+                       if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
+                               continue
+                       }
+                       // already have a mapping for key. Update it.
+                       inserti = i
+                       insertb = b
+                       // Overwrite existing key, so it can be garbage collected.
+                       // The size is already guaranteed to be set correctly.
+                       k.str = key.str
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if insertb == nil {
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+               insertb = h.newoverflow(t, b)
+               inserti = 0 // not necessary, but avoids needlessly spilling inserti
+       }
+       insertb.tophash[inserti&(abi.MapBucketCount-1)] = top // mask inserti to avoid bounds checks
+
+       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
+       // store new key at insert position
+       *((*stringStruct)(insertk)) = *key
+       h.count++
+
+done:
+       elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       return elem
+}
+
+func mapdelete_faststr(t *maptype, h *hmap, ky string) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
+       }
+       if h == nil || h.count == 0 {
+               return
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+
+       key := stringStructOf(&ky)
+       hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher for consistency with mapdelete
+       h.flags ^= hashWriting
+
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_faststr(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+       bOrig := b
+       top := tophash(hash)
+search:
+       for ; b != nil; b = b.overflow(t) {
+               for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
+                       k := (*stringStruct)(kptr)
+                       if k.len != key.len || b.tophash[i] != top {
+                               continue
+                       }
+                       if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
+                               continue
+                       }
+                       // Clear key's pointer.
+                       k.str = nil
+                       e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
+                       if t.Elem.Pointers() {
+                               memclrHasPointers(e, t.Elem.Size_)
+                       } else {
+                               memclrNoHeapPointers(e, t.Elem.Size_)
+                       }
+                       b.tophash[i] = emptyOne
+                       // If the bucket now ends in a bunch of emptyOne states,
+                       // change those to emptyRest states.
+                       if i == abi.MapBucketCount-1 {
+                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+                                       goto notLast
+                               }
+                       } else {
+                               if b.tophash[i+1] != emptyRest {
+                                       goto notLast
+                               }
+                       }
+                       for {
+                               b.tophash[i] = emptyRest
+                               if i == 0 {
+                                       if b == bOrig {
+                                               break // beginning of initial bucket, we're done.
+                                       }
+                                       // Find previous bucket, continue at its last entry.
+                                       c := b
+                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+                                       }
+                                       i = abi.MapBucketCount - 1
+                               } else {
+                                       i--
+                               }
+                               if b.tophash[i] != emptyOne {
+                                       break
+                               }
+                       }
+               notLast:
+                       h.count--
+                       // Reset the hash seed to make it more difficult for attackers to
+                       // repeatedly trigger hash collisions. See issue 25237.
+                       if h.count == 0 {
+                               h.hash0 = uint32(rand())
+                       }
+                       break search
+               }
+       }
+
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+}
+
+func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
+       // make sure we evacuate the oldbucket corresponding
+       // to the bucket we're about to use
+       evacuate_faststr(t, h, bucket&h.oldbucketmask())
+
+       // evacuate one more oldbucket to make progress on growing
+       if h.growing() {
+               evacuate_faststr(t, h, h.nevacuate)
+       }
+}
+
+func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
+       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
+       newbit := h.noldbuckets()
+       if !evacuated(b) {
+               // TODO: reuse overflow buckets instead of using new ones, if there
+               // is no iterator using the old buckets.  (If !oldIterator.)
+
+               // xy contains the x and y (low and high) evacuation destinations.
+               var xy [2]evacDst
+               x := &xy[0]
+               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
+               x.k = add(unsafe.Pointer(x.b), dataOffset)
+               x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
+
+               if !h.sameSizeGrow() {
+                       // Only calculate y pointers if we're growing bigger.
+                       // Otherwise GC can see bad pointers.
+                       y := &xy[1]
+                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
+                       y.k = add(unsafe.Pointer(y.b), dataOffset)
+                       y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
+               }
+
+               for ; b != nil; b = b.overflow(t) {
+                       k := add(unsafe.Pointer(b), dataOffset)
+                       e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
+                       for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
+                               top := b.tophash[i]
+                               if isEmpty(top) {
+                                       b.tophash[i] = evacuatedEmpty
+                                       continue
+                               }
+                               if top < minTopHash {
+                                       throw("bad map state")
+                               }
+                               var useY uint8
+                               if !h.sameSizeGrow() {
+                                       // Compute hash to make our evacuation decision (whether we need
+                                       // to send this key/elem to bucket x or bucket y).
+                                       hash := t.Hasher(k, uintptr(h.hash0))
+                                       if hash&newbit != 0 {
+                                               useY = 1
+                                       }
+                               }
+
+                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
+                               dst := &xy[useY]                 // evacuation destination
+
+                               if dst.i == abi.MapBucketCount {
+                                       dst.b = h.newoverflow(t, dst.b)
+                                       dst.i = 0
+                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+                                       dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
+                               }
+                               dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+
+                               // Copy key.
+                               *(*string)(dst.k) = *(*string)(k)
+
+                               typedmemmove(t.Elem, dst.e, e)
+                               dst.i++
+                               // These updates might push these pointers past the end of the
+                               // key or elem arrays.  That's ok, as we have the overflow pointer
+                               // at the end of the bucket to protect against pointing past the
+                               // end of the bucket.
+                               dst.k = add(dst.k, 2*goarch.PtrSize)
+                               dst.e = add(dst.e, uintptr(t.ValueSize))
+                       }
+               }
+               // Unlink the overflow buckets & clear key/elem to help GC.
+               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
+                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
+                       // Preserve b.tophash because the evacuation
+                       // state is maintained there.
+                       ptr := add(b, dataOffset)
+                       n := uintptr(t.BucketSize) - dataOffset
+                       memclrHasPointers(ptr, n)
+               }
+       }
+
+       if oldbucket == h.nevacuate {
+               advanceEvacuationMark(h, t, newbit)
+       }
+}
similarity index 99%
rename from src/runtime/map.go
rename to src/runtime/map_noswiss.go
index 9169d5733eef4f60915c3c6d436d744d4b5f5ac2..aff124691f81d0f61abf387bd158f95ade87b69f 100644 (file)
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !goexperiment.swissmap
+
 package runtime
 
 // This file contains the implementation of Go's map type.
diff --git a/src/runtime/map_swiss.go b/src/runtime/map_swiss.go
new file mode 100644 (file)
index 0000000..d6c97e4
--- /dev/null
@@ -0,0 +1,1724 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package runtime
+
+// This file contains the implementation of Go's map type.
+//
+// A map is just a hash table. The data is arranged
+// into an array of buckets. Each bucket contains up to
+// 8 key/elem pairs. The low-order bits of the hash are
+// used to select a bucket. Each bucket contains a few
+// high-order bits of each hash to distinguish the entries
+// within a single bucket.
+//
+// If more than 8 keys hash to a bucket, we chain on
+// extra buckets.
+//
+// When the hashtable grows, we allocate a new array
+// of buckets twice as big. Buckets are incrementally
+// copied from the old bucket array to the new bucket array.
+//
+// Map iterators walk through the array of buckets and
+// return the keys in walk order (bucket #, then overflow
+// chain order, then bucket index).  To maintain iteration
+// semantics, we never move keys within their bucket (if
+// we did, keys might be returned 0 or 2 times).  When
+// growing the table, iterators remain iterating through the
+// old table and must check the new table if the bucket
+// they are iterating through has been moved ("evacuated")
+// to the new table.
+
+// Picking loadFactor: too large and we have lots of overflow
+// buckets, too small and we waste a lot of space. I wrote
+// a simple program to check some stats for different loads:
+// (64-bit, 8 byte keys and elems)
+//  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
+//        4.00         2.13        20.77         3.00         4.00
+//        4.50         4.05        17.30         3.25         4.50
+//        5.00         6.85        14.77         3.50         5.00
+//        5.50        10.55        12.94         3.75         5.50
+//        6.00        15.27        11.67         4.00         6.00
+//        6.50        20.90        10.79         4.25         6.50
+//        7.00        27.14        10.15         4.50         7.00
+//        7.50        34.03         9.73         4.75         7.50
+//        8.00        41.10         9.40         5.00         8.00
+//
+// %overflow   = percentage of buckets which have an overflow bucket
+// bytes/entry = overhead bytes used per key/elem pair
+// hitprobe    = # of entries to check when looking up a present key
+// missprobe   = # of entries to check when looking up an absent key
+//
+// Keep in mind this data is for maximally loaded tables, i.e. just
+// before the table grows. Typical tables will be somewhat less loaded.
+
+import (
+       "internal/abi"
+       "internal/goarch"
+       "internal/runtime/atomic"
+       "internal/runtime/math"
+       "unsafe"
+)
+
+const (
+       // Maximum number of key/elem pairs a bucket can hold.
+       bucketCntBits = abi.MapBucketCountBits
+
+       // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
+       // Because of minimum alignment rules, bucketCnt is known to be at least 8.
+       // Represent as loadFactorNum/loadFactorDen, to allow integer math.
+       loadFactorDen = 2
+       loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
+
+       // data offset should be the size of the bmap struct, but needs to be
+       // aligned correctly. For amd64p32 this means 64-bit alignment
+       // even though pointers are 32 bit.
+       dataOffset = unsafe.Offsetof(struct {
+               b bmap
+               v int64
+       }{}.v)
+
+       // Possible tophash values. We reserve a few possibilities for special marks.
+       // Each bucket (including its overflow buckets, if any) will have either all or none of its
+       // entries in the evacuated* states (except during the evacuate() method, which only happens
+       // during map writes and thus no one else can observe the map during that time).
+       emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
+       emptyOne       = 1 // this cell is empty
+       evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
+       evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
+       evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
+       minTopHash     = 5 // minimum tophash for a normal filled cell.
+
+       // flags
+       iterator     = 1 // there may be an iterator using buckets
+       oldIterator  = 2 // there may be an iterator using oldbuckets
+       hashWriting  = 4 // a goroutine is writing to the map
+       sameSizeGrow = 8 // the current map growth is to a new map of the same size
+
+       // sentinel bucket ID for iterator checks
+       noCheck = 1<<(8*goarch.PtrSize) - 1
+)
+
+// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
+func isEmpty(x uint8) bool {
+       return x <= emptyOne
+}
+
+// A header for a Go map.
+type hmap struct {
+       // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
+       // Make sure this stays in sync with the compiler's definition.
+       count     int // # live cells == size of map.  Must be first (used by len() builtin)
+       flags     uint8
+       B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
+       noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
+       hash0     uint32 // hash seed
+
+       buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
+       oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
+       nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
+
+       extra *mapextra // optional fields
+}
+
+// mapextra holds fields that are not present on all maps.
+type mapextra struct {
+       // If both key and elem do not contain pointers and are inline, then we mark bucket
+       // type as containing no pointers. This avoids scanning such maps.
+       // However, bmap.overflow is a pointer. In order to keep overflow buckets
+       // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
+       // overflow and oldoverflow are only used if key and elem do not contain pointers.
+       // overflow contains overflow buckets for hmap.buckets.
+       // oldoverflow contains overflow buckets for hmap.oldbuckets.
+       // The indirection allows to store a pointer to the slice in hiter.
+       overflow    *[]*bmap
+       oldoverflow *[]*bmap
+
+       // nextOverflow holds a pointer to a free overflow bucket.
+       nextOverflow *bmap
+}
+
+// A bucket for a Go map.
+type bmap struct {
+       // tophash generally contains the top byte of the hash value
+       // for each key in this bucket. If tophash[0] < minTopHash,
+       // tophash[0] is a bucket evacuation state instead.
+       tophash [abi.MapBucketCount]uint8
+       // Followed by bucketCnt keys and then bucketCnt elems.
+       // NOTE: packing all the keys together and then all the elems together makes the
+       // code a bit more complicated than alternating key/elem/key/elem/... but it allows
+       // us to eliminate padding which would be needed for, e.g., map[int64]int8.
+       // Followed by an overflow pointer.
+}
+
+// A hash iteration structure.
+// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
+// and reflect/value.go to match the layout of this structure.
+type hiter struct {
+       key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
+       elem        unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
+       t           *maptype
+       h           *hmap
+       buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
+       bptr        *bmap          // current bucket
+       overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
+       oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
+       startBucket uintptr        // bucket iteration started at
+       offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
+       wrapped     bool           // already wrapped around from end of bucket array to beginning
+       B           uint8
+       i           uint8
+       bucket      uintptr
+       checkBucket uintptr
+}
+
+// bucketShift returns 1<<b, optimized for code generation.
+func bucketShift(b uint8) uintptr {
+       // Masking the shift amount allows overflow checks to be elided.
+       return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
+}
+
+// bucketMask returns 1<<b - 1, optimized for code generation.
+func bucketMask(b uint8) uintptr {
+       return bucketShift(b) - 1
+}
+
+// tophash calculates the tophash value for hash.
+func tophash(hash uintptr) uint8 {
+       top := uint8(hash >> (goarch.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+       return top
+}
+
+func evacuated(b *bmap) bool {
+       h := b.tophash[0]
+       return h > emptyOne && h < minTopHash
+}
+
+func (b *bmap) overflow(t *maptype) *bmap {
+       return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
+}
+
+func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
+       *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
+}
+
+func (b *bmap) keys() unsafe.Pointer {
+       return add(unsafe.Pointer(b), dataOffset)
+}
+
+// incrnoverflow increments h.noverflow.
+// noverflow counts the number of overflow buckets.
+// This is used to trigger same-size map growth.
+// See also tooManyOverflowBuckets.
+// To keep hmap small, noverflow is a uint16.
+// When there are few buckets, noverflow is an exact count.
+// When there are many buckets, noverflow is an approximate count.
+func (h *hmap) incrnoverflow() {
+       // We trigger same-size map growth if there are
+       // as many overflow buckets as buckets.
+       // We need to be able to count to 1<<h.B.
+       if h.B < 16 {
+               h.noverflow++
+               return
+       }
+       // Increment with probability 1/(1<<(h.B-15)).
+       // When we reach 1<<15 - 1, we will have approximately
+       // as many overflow buckets as buckets.
+       mask := uint32(1)<<(h.B-15) - 1
+       // Example: if h.B == 18, then mask == 7,
+       // and rand() & 7 == 0 with probability 1/8.
+       if uint32(rand())&mask == 0 {
+               h.noverflow++
+       }
+}
+
+func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
+       var ovf *bmap
+       if h.extra != nil && h.extra.nextOverflow != nil {
+               // We have preallocated overflow buckets available.
+               // See makeBucketArray for more details.
+               ovf = h.extra.nextOverflow
+               if ovf.overflow(t) == nil {
+                       // We're not at the end of the preallocated overflow buckets. Bump the pointer.
+                       h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
+               } else {
+                       // This is the last preallocated overflow bucket.
+                       // Reset the overflow pointer on this bucket,
+                       // which was set to a non-nil sentinel value.
+                       ovf.setoverflow(t, nil)
+                       h.extra.nextOverflow = nil
+               }
+       } else {
+               ovf = (*bmap)(newobject(t.Bucket))
+       }
+       h.incrnoverflow()
+       if !t.Bucket.Pointers() {
+               h.createOverflow()
+               *h.extra.overflow = append(*h.extra.overflow, ovf)
+       }
+       b.setoverflow(t, ovf)
+       return ovf
+}
+
+func (h *hmap) createOverflow() {
+       if h.extra == nil {
+               h.extra = new(mapextra)
+       }
+       if h.extra.overflow == nil {
+               h.extra.overflow = new([]*bmap)
+       }
+}
+
+func makemap64(t *maptype, hint int64, h *hmap) *hmap {
+       if int64(int(hint)) != hint {
+               hint = 0
+       }
+       return makemap(t, int(hint), h)
+}
+
+// makemap_small implements Go map creation for make(map[k]v) and
+// make(map[k]v, hint) when hint is known to be at most bucketCnt
+// at compile time and the map needs to be allocated on the heap.
+func makemap_small() *hmap {
+       h := new(hmap)
+       h.hash0 = uint32(rand())
+       return h
+}
+
+// makemap implements Go map creation for make(map[k]v, hint).
+// If the compiler has determined that the map or the first bucket
+// can be created on the stack, h and/or bucket may be non-nil.
+// If h != nil, the map can be created directly in h.
+// If h.buckets != nil, bucket pointed to can be used as the first bucket.
+func makemap(t *maptype, hint int, h *hmap) *hmap {
+       mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
+       if overflow || mem > maxAlloc {
+               hint = 0
+       }
+
+       // initialize Hmap
+       if h == nil {
+               h = new(hmap)
+       }
+       h.hash0 = uint32(rand())
+
+       // Find the size parameter B which will hold the requested # of elements.
+       // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
+       B := uint8(0)
+       for overLoadFactor(hint, B) {
+               B++
+       }
+       h.B = B
+
+       // allocate initial hash table
+       // if B == 0, the buckets field is allocated lazily later (in mapassign)
+       // If hint is large zeroing this memory could take a while.
+       if h.B != 0 {
+               var nextOverflow *bmap
+               h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
+               if nextOverflow != nil {
+                       h.extra = new(mapextra)
+                       h.extra.nextOverflow = nextOverflow
+               }
+       }
+
+       return h
+}
+
+// makeBucketArray initializes a backing array for map buckets.
+// 1<<b is the minimum number of buckets to allocate.
+// dirtyalloc should either be nil or a bucket array previously
+// allocated by makeBucketArray with the same t and b parameters.
+// If dirtyalloc is nil a new backing array will be alloced and
+// otherwise dirtyalloc will be cleared and reused as backing array.
+func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
+       base := bucketShift(b)
+       nbuckets := base
+       // For small b, overflow buckets are unlikely.
+       // Avoid the overhead of the calculation.
+       if b >= 4 {
+               // Add on the estimated number of overflow buckets
+               // required to insert the median number of elements
+               // used with this value of b.
+               nbuckets += bucketShift(b - 4)
+               sz := t.Bucket.Size_ * nbuckets
+               up := roundupsize(sz, !t.Bucket.Pointers())
+               if up != sz {
+                       nbuckets = up / t.Bucket.Size_
+               }
+       }
+
+       if dirtyalloc == nil {
+               buckets = newarray(t.Bucket, int(nbuckets))
+       } else {
+               // dirtyalloc was previously generated by
+               // the above newarray(t.Bucket, int(nbuckets))
+               // but may not be empty.
+               buckets = dirtyalloc
+               size := t.Bucket.Size_ * nbuckets
+               if t.Bucket.Pointers() {
+                       memclrHasPointers(buckets, size)
+               } else {
+                       memclrNoHeapPointers(buckets, size)
+               }
+       }
+
+       if base != nbuckets {
+               // We preallocated some overflow buckets.
+               // To keep the overhead of tracking these overflow buckets to a minimum,
+               // we use the convention that if a preallocated overflow bucket's overflow
+               // pointer is nil, then there are more available by bumping the pointer.
+               // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
+               nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
+               last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
+               last.setoverflow(t, (*bmap)(buckets))
+       }
+       return buckets, nextOverflow
+}
+
+// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
+// it will return a reference to the zero object for the elem type if
+// the key is not in the map.
+// NOTE: The returned pointer may keep the whole map live, so don't
+// hold onto it for very long.
+func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               pc := abi.FuncPCABIInternal(mapaccess1)
+               racereadpc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.Key, key, callerpc, pc)
+       }
+       if msanenabled && h != nil {
+               msanread(key, t.Key.Size_)
+       }
+       if asanenabled && h != nil {
+               asanread(key, t.Key.Size_)
+       }
+       if h == nil || h.count == 0 {
+               if err := mapKeyError(t, key); err != nil {
+                       panic(err) // see issue 23734
+               }
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       hash := t.Hasher(key, uintptr(h.hash0))
+       m := bucketMask(h.B)
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+       if c := h.oldbuckets; c != nil {
+               if !h.sameSizeGrow() {
+                       // There used to be half as many buckets; mask down one more power of two.
+                       m >>= 1
+               }
+               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := tophash(hash)
+bucketloop:
+       for ; b != nil; b = b.overflow(t) {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if b.tophash[i] != top {
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
+                       if t.IndirectKey() {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if t.Key.Equal(key, k) {
+                               e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+                               if t.IndirectElem() {
+                                       e = *((*unsafe.Pointer)(e))
+                               }
+                               return e
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0])
+}
+
+func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               pc := abi.FuncPCABIInternal(mapaccess2)
+               racereadpc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.Key, key, callerpc, pc)
+       }
+       if msanenabled && h != nil {
+               msanread(key, t.Key.Size_)
+       }
+       if asanenabled && h != nil {
+               asanread(key, t.Key.Size_)
+       }
+       if h == nil || h.count == 0 {
+               if err := mapKeyError(t, key); err != nil {
+                       panic(err) // see issue 23734
+               }
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map read and map write")
+       }
+       hash := t.Hasher(key, uintptr(h.hash0))
+       m := bucketMask(h.B)
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+       if c := h.oldbuckets; c != nil {
+               if !h.sameSizeGrow() {
+                       // There used to be half as many buckets; mask down one more power of two.
+                       m >>= 1
+               }
+               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := tophash(hash)
+bucketloop:
+       for ; b != nil; b = b.overflow(t) {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if b.tophash[i] != top {
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
+                       if t.IndirectKey() {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if t.Key.Equal(key, k) {
+                               e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+                               if t.IndirectElem() {
+                                       e = *((*unsafe.Pointer)(e))
+                               }
+                               return e, true
+                       }
+               }
+       }
+       return unsafe.Pointer(&zeroVal[0]), false
+}
+
+// returns both key and elem. Used by map iterator.
+func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
+       if h == nil || h.count == 0 {
+               return nil, nil
+       }
+       hash := t.Hasher(key, uintptr(h.hash0))
+       m := bucketMask(h.B)
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
+       if c := h.oldbuckets; c != nil {
+               if !h.sameSizeGrow() {
+                       // There used to be half as many buckets; mask down one more power of two.
+                       m >>= 1
+               }
+               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := tophash(hash)
+bucketloop:
+       for ; b != nil; b = b.overflow(t) {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if b.tophash[i] != top {
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
+                       if t.IndirectKey() {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if t.Key.Equal(key, k) {
+                               e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+                               if t.IndirectElem() {
+                                       e = *((*unsafe.Pointer)(e))
+                               }
+                               return k, e
+                       }
+               }
+       }
+       return nil, nil
+}
+
+func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
+       e := mapaccess1(t, h, key)
+       if e == unsafe.Pointer(&zeroVal[0]) {
+               return zero
+       }
+       return e
+}
+
+func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+       e := mapaccess1(t, h, key)
+       if e == unsafe.Pointer(&zeroVal[0]) {
+               return zero, false
+       }
+       return e, true
+}
+
+// Like mapaccess, but allocates a slot for the key if it is not present in the map.
+func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               pc := abi.FuncPCABIInternal(mapassign)
+               racewritepc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.Key, key, callerpc, pc)
+       }
+       if msanenabled {
+               msanread(key, t.Key.Size_)
+       }
+       if asanenabled {
+               asanread(key, t.Key.Size_)
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+       hash := t.Hasher(key, uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher, since t.hasher may panic,
+       // in which case we have not actually done a write.
+       h.flags ^= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+       top := tophash(hash)
+
+       var inserti *uint8
+       var insertk unsafe.Pointer
+       var elem unsafe.Pointer
+bucketloop:
+       for {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if b.tophash[i] != top {
+                               if isEmpty(b.tophash[i]) && inserti == nil {
+                                       inserti = &b.tophash[i]
+                                       insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
+                                       elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+                               }
+                               if b.tophash[i] == emptyRest {
+                                       break bucketloop
+                               }
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
+                       if t.IndirectKey() {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if !t.Key.Equal(key, k) {
+                               continue
+                       }
+                       // already have a mapping for key. Update it.
+                       if t.NeedKeyUpdate() {
+                               typedmemmove(t.Key, k, key)
+                       }
+                       elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if inserti == nil {
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
+               newb := h.newoverflow(t, b)
+               inserti = &newb.tophash[0]
+               insertk = add(unsafe.Pointer(newb), dataOffset)
+               elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
+       }
+
+       // store new key/elem at insert position
+       if t.IndirectKey() {
+               kmem := newobject(t.Key)
+               *(*unsafe.Pointer)(insertk) = kmem
+               insertk = kmem
+       }
+       if t.IndirectElem() {
+               vmem := newobject(t.Elem)
+               *(*unsafe.Pointer)(elem) = vmem
+       }
+       typedmemmove(t.Key, insertk, key)
+       *inserti = top
+       h.count++
+
+done:
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       if t.IndirectElem() {
+               elem = *((*unsafe.Pointer)(elem))
+       }
+       return elem
+}
+
+func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               pc := abi.FuncPCABIInternal(mapdelete)
+               racewritepc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.Key, key, callerpc, pc)
+       }
+       if msanenabled && h != nil {
+               msanread(key, t.Key.Size_)
+       }
+       if asanenabled && h != nil {
+               asanread(key, t.Key.Size_)
+       }
+       if h == nil || h.count == 0 {
+               if err := mapKeyError(t, key); err != nil {
+                       panic(err) // see issue 23734
+               }
+               return
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+
+       hash := t.Hasher(key, uintptr(h.hash0))
+
+       // Set hashWriting after calling t.hasher, since t.hasher may panic,
+       // in which case we have not actually done a write (delete).
+       h.flags ^= hashWriting
+
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork(t, h, bucket)
+       }
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
+       bOrig := b
+       top := tophash(hash)
+search:
+       for ; b != nil; b = b.overflow(t) {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       if b.tophash[i] != top {
+                               if b.tophash[i] == emptyRest {
+                                       break search
+                               }
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
+                       k2 := k
+                       if t.IndirectKey() {
+                               k2 = *((*unsafe.Pointer)(k2))
+                       }
+                       if !t.Key.Equal(key, k2) {
+                               continue
+                       }
+                       // Only clear key if there are pointers in it.
+                       if t.IndirectKey() {
+                               *(*unsafe.Pointer)(k) = nil
+                       } else if t.Key.Pointers() {
+                               memclrHasPointers(k, t.Key.Size_)
+                       }
+                       e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+                       if t.IndirectElem() {
+                               *(*unsafe.Pointer)(e) = nil
+                       } else if t.Elem.Pointers() {
+                               memclrHasPointers(e, t.Elem.Size_)
+                       } else {
+                               memclrNoHeapPointers(e, t.Elem.Size_)
+                       }
+                       b.tophash[i] = emptyOne
+                       // If the bucket now ends in a bunch of emptyOne states,
+                       // change those to emptyRest states.
+                       // It would be nice to make this a separate function, but
+                       // for loops are not currently inlineable.
+                       if i == abi.MapBucketCount-1 {
+                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
+                                       goto notLast
+                               }
+                       } else {
+                               if b.tophash[i+1] != emptyRest {
+                                       goto notLast
+                               }
+                       }
+                       for {
+                               b.tophash[i] = emptyRest
+                               if i == 0 {
+                                       if b == bOrig {
+                                               break // beginning of initial bucket, we're done.
+                                       }
+                                       // Find previous bucket, continue at its last entry.
+                                       c := b
+                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
+                                       }
+                                       i = abi.MapBucketCount - 1
+                               } else {
+                                       i--
+                               }
+                               if b.tophash[i] != emptyOne {
+                                       break
+                               }
+                       }
+               notLast:
+                       h.count--
+                       // Reset the hash seed to make it more difficult for attackers to
+                       // repeatedly trigger hash collisions. See issue 25237.
+                       if h.count == 0 {
+                               h.hash0 = uint32(rand())
+                       }
+                       break search
+               }
+       }
+
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+}
+
+// mapiterinit initializes the hiter struct used for ranging over maps.
+// The hiter struct pointed to by 'it' is allocated on the stack
+// by the compilers order pass or on the heap by reflect_mapiterinit.
+// Both need to have zeroed hiter since the struct contains pointers.
+func mapiterinit(t *maptype, h *hmap, it *hiter) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
+       }
+
+       it.t = t
+       if h == nil || h.count == 0 {
+               return
+       }
+
+       if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
+               throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
+       }
+       it.h = h
+
+       // grab snapshot of bucket state
+       it.B = h.B
+       it.buckets = h.buckets
+       if !t.Bucket.Pointers() {
+               // Allocate the current slice and remember pointers to both current and old.
+               // This preserves all relevant overflow buckets alive even if
+               // the table grows and/or overflow buckets are added to the table
+               // while we are iterating.
+               h.createOverflow()
+               it.overflow = h.extra.overflow
+               it.oldoverflow = h.extra.oldoverflow
+       }
+
+       // decide where to start
+       r := uintptr(rand())
+       it.startBucket = r & bucketMask(h.B)
+       it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
+
+       // iterator state
+       it.bucket = it.startBucket
+
+       // Remember we have an iterator.
+       // Can run concurrently with another mapiterinit().
+       if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
+               atomic.Or8(&h.flags, iterator|oldIterator)
+       }
+
+       mapiternext(it)
+}
+
+func mapiternext(it *hiter) {
+       h := it.h
+       if raceenabled {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
+       }
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map iteration and map write")
+       }
+       t := it.t
+       bucket := it.bucket
+       b := it.bptr
+       i := it.i
+       checkBucket := it.checkBucket
+
+next:
+       if b == nil {
+               if bucket == it.startBucket && it.wrapped {
+                       // end of iteration
+                       it.key = nil
+                       it.elem = nil
+                       return
+               }
+               if h.growing() && it.B == h.B {
+                       // Iterator was started in the middle of a grow, and the grow isn't done yet.
+                       // If the bucket we're looking at hasn't been filled in yet (i.e. the old
+                       // bucket hasn't been evacuated) then we need to iterate through the old
+                       // bucket and only return the ones that will be migrated to this bucket.
+                       oldbucket := bucket & it.h.oldbucketmask()
+                       b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
+                       if !evacuated(b) {
+                               checkBucket = bucket
+                       } else {
+                               b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
+                               checkBucket = noCheck
+                       }
+               } else {
+                       b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
+                       checkBucket = noCheck
+               }
+               bucket++
+               if bucket == bucketShift(it.B) {
+                       bucket = 0
+                       it.wrapped = true
+               }
+               i = 0
+       }
+       for ; i < abi.MapBucketCount; i++ {
+               offi := (i + it.offset) & (abi.MapBucketCount - 1)
+               if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
+                       // TODO: emptyRest is hard to use here, as we start iterating
+                       // in the middle of a bucket. It's feasible, just tricky.
+                       continue
+               }
+               k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
+               if t.IndirectKey() {
+                       k = *((*unsafe.Pointer)(k))
+               }
+               e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
+               if checkBucket != noCheck && !h.sameSizeGrow() {
+                       // Special case: iterator was started during a grow to a larger size
+                       // and the grow is not done yet. We're working on a bucket whose
+                       // oldbucket has not been evacuated yet. Or at least, it wasn't
+                       // evacuated when we started the bucket. So we're iterating
+                       // through the oldbucket, skipping any keys that will go
+                       // to the other new bucket (each oldbucket expands to two
+                       // buckets during a grow).
+                       if t.ReflexiveKey() || t.Key.Equal(k, k) {
+                               // If the item in the oldbucket is not destined for
+                               // the current new bucket in the iteration, skip it.
+                               hash := t.Hasher(k, uintptr(h.hash0))
+                               if hash&bucketMask(it.B) != checkBucket {
+                                       continue
+                               }
+                       } else {
+                               // Hash isn't repeatable if k != k (NaNs).  We need a
+                               // repeatable and randomish choice of which direction
+                               // to send NaNs during evacuation. We'll use the low
+                               // bit of tophash to decide which way NaNs go.
+                               // NOTE: this case is why we need two evacuate tophash
+                               // values, evacuatedX and evacuatedY, that differ in
+                               // their low bit.
+                               if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
+                                       continue
+                               }
+                       }
+               }
+               if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
+                       !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
+                       // This is the golden data, we can return it.
+                       // OR
+                       // key!=key, so the entry can't be deleted or updated, so we can just return it.
+                       // That's lucky for us because when key!=key we can't look it up successfully.
+                       it.key = k
+                       if t.IndirectElem() {
+                               e = *((*unsafe.Pointer)(e))
+                       }
+                       it.elem = e
+               } else {
+                       // The hash table has grown since the iterator was started.
+                       // The golden data for this key is now somewhere else.
+                       // Check the current hash table for the data.
+                       // This code handles the case where the key
+                       // has been deleted, updated, or deleted and reinserted.
+                       // NOTE: we need to regrab the key as it has potentially been
+                       // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
+                       rk, re := mapaccessK(t, h, k)
+                       if rk == nil {
+                               continue // key has been deleted
+                       }
+                       it.key = rk
+                       it.elem = re
+               }
+               it.bucket = bucket
+               if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
+                       it.bptr = b
+               }
+               it.i = i + 1
+               it.checkBucket = checkBucket
+               return
+       }
+       b = b.overflow(t)
+       i = 0
+       goto next
+}
+
+// mapclear deletes all keys from a map.
+func mapclear(t *maptype, h *hmap) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc()
+               pc := abi.FuncPCABIInternal(mapclear)
+               racewritepc(unsafe.Pointer(h), callerpc, pc)
+       }
+
+       if h == nil || h.count == 0 {
+               return
+       }
+
+       if h.flags&hashWriting != 0 {
+               fatal("concurrent map writes")
+       }
+
+       h.flags ^= hashWriting
+
+       // Mark buckets empty, so existing iterators can be terminated, see issue #59411.
+       markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
+               for i := uintptr(0); i <= mask; i++ {
+                       b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
+                       for ; b != nil; b = b.overflow(t) {
+                               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                                       b.tophash[i] = emptyRest
+                               }
+                       }
+               }
+       }
+       markBucketsEmpty(h.buckets, bucketMask(h.B))
+       if oldBuckets := h.oldbuckets; oldBuckets != nil {
+               markBucketsEmpty(oldBuckets, h.oldbucketmask())
+       }
+
+       h.flags &^= sameSizeGrow
+       h.oldbuckets = nil
+       h.nevacuate = 0
+       h.noverflow = 0
+       h.count = 0
+
+       // Reset the hash seed to make it more difficult for attackers to
+       // repeatedly trigger hash collisions. See issue 25237.
+       h.hash0 = uint32(rand())
+
+       // Keep the mapextra allocation but clear any extra information.
+       if h.extra != nil {
+               *h.extra = mapextra{}
+       }
+
+       // makeBucketArray clears the memory pointed to by h.buckets
+       // and recovers any overflow buckets by generating them
+       // as if h.buckets was newly alloced.
+       _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
+       if nextOverflow != nil {
+               // If overflow buckets are created then h.extra
+               // will have been allocated during initial bucket creation.
+               h.extra.nextOverflow = nextOverflow
+       }
+
+       if h.flags&hashWriting == 0 {
+               fatal("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+}
+
+func hashGrow(t *maptype, h *hmap) {
+       // If we've hit the load factor, get bigger.
+       // Otherwise, there are too many overflow buckets,
+       // so keep the same number of buckets and "grow" laterally.
+       bigger := uint8(1)
+       if !overLoadFactor(h.count+1, h.B) {
+               bigger = 0
+               h.flags |= sameSizeGrow
+       }
+       oldbuckets := h.buckets
+       newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
+
+       flags := h.flags &^ (iterator | oldIterator)
+       if h.flags&iterator != 0 {
+               flags |= oldIterator
+       }
+       // commit the grow (atomic wrt gc)
+       h.B += bigger
+       h.flags = flags
+       h.oldbuckets = oldbuckets
+       h.buckets = newbuckets
+       h.nevacuate = 0
+       h.noverflow = 0
+
+       if h.extra != nil && h.extra.overflow != nil {
+               // Promote current overflow buckets to the old generation.
+               if h.extra.oldoverflow != nil {
+                       throw("oldoverflow is not nil")
+               }
+               h.extra.oldoverflow = h.extra.overflow
+               h.extra.overflow = nil
+       }
+       if nextOverflow != nil {
+               if h.extra == nil {
+                       h.extra = new(mapextra)
+               }
+               h.extra.nextOverflow = nextOverflow
+       }
+
+       // the actual copying of the hash table data is done incrementally
+       // by growWork() and evacuate().
+}
+
+// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
+func overLoadFactor(count int, B uint8) bool {
+       return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
+}
+
+// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
+// Note that most of these overflow buckets must be in sparse use;
+// if use was dense, then we'd have already triggered regular map growth.
+func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
+       // If the threshold is too low, we do extraneous work.
+       // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
+       // "too many" means (approximately) as many overflow buckets as regular buckets.
+       // See incrnoverflow for more details.
+       if B > 15 {
+               B = 15
+       }
+       // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
+       return noverflow >= uint16(1)<<(B&15)
+}
+
+// growing reports whether h is growing. The growth may be to the same size or bigger.
+func (h *hmap) growing() bool {
+       return h.oldbuckets != nil
+}
+
+// sameSizeGrow reports whether the current growth is to a map of the same size.
+func (h *hmap) sameSizeGrow() bool {
+       return h.flags&sameSizeGrow != 0
+}
+
+// noldbuckets calculates the number of buckets prior to the current map growth.
+func (h *hmap) noldbuckets() uintptr {
+       oldB := h.B
+       if !h.sameSizeGrow() {
+               oldB--
+       }
+       return bucketShift(oldB)
+}
+
+// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
+func (h *hmap) oldbucketmask() uintptr {
+       return h.noldbuckets() - 1
+}
+
+func growWork(t *maptype, h *hmap, bucket uintptr) {
+       // make sure we evacuate the oldbucket corresponding
+       // to the bucket we're about to use
+       evacuate(t, h, bucket&h.oldbucketmask())
+
+       // evacuate one more oldbucket to make progress on growing
+       if h.growing() {
+               evacuate(t, h, h.nevacuate)
+       }
+}
+
+func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
+       b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
+       return evacuated(b)
+}
+
+// evacDst is an evacuation destination.
+type evacDst struct {
+       b *bmap          // current destination bucket
+       i int            // key/elem index into b
+       k unsafe.Pointer // pointer to current key storage
+       e unsafe.Pointer // pointer to current elem storage
+}
+
+func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
+       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
+       newbit := h.noldbuckets()
+       if !evacuated(b) {
+               // TODO: reuse overflow buckets instead of using new ones, if there
+               // is no iterator using the old buckets.  (If !oldIterator.)
+
+               // xy contains the x and y (low and high) evacuation destinations.
+               var xy [2]evacDst
+               x := &xy[0]
+               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
+               x.k = add(unsafe.Pointer(x.b), dataOffset)
+               x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
+
+               if !h.sameSizeGrow() {
+                       // Only calculate y pointers if we're growing bigger.
+                       // Otherwise GC can see bad pointers.
+                       y := &xy[1]
+                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
+                       y.k = add(unsafe.Pointer(y.b), dataOffset)
+                       y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
+               }
+
+               for ; b != nil; b = b.overflow(t) {
+                       k := add(unsafe.Pointer(b), dataOffset)
+                       e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
+                       for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
+                               top := b.tophash[i]
+                               if isEmpty(top) {
+                                       b.tophash[i] = evacuatedEmpty
+                                       continue
+                               }
+                               if top < minTopHash {
+                                       throw("bad map state")
+                               }
+                               k2 := k
+                               if t.IndirectKey() {
+                                       k2 = *((*unsafe.Pointer)(k2))
+                               }
+                               var useY uint8
+                               if !h.sameSizeGrow() {
+                                       // Compute hash to make our evacuation decision (whether we need
+                                       // to send this key/elem to bucket x or bucket y).
+                                       hash := t.Hasher(k2, uintptr(h.hash0))
+                                       if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
+                                               // If key != key (NaNs), then the hash could be (and probably
+                                               // will be) entirely different from the old hash. Moreover,
+                                               // it isn't reproducible. Reproducibility is required in the
+                                               // presence of iterators, as our evacuation decision must
+                                               // match whatever decision the iterator made.
+                                               // Fortunately, we have the freedom to send these keys either
+                                               // way. Also, tophash is meaningless for these kinds of keys.
+                                               // We let the low bit of tophash drive the evacuation decision.
+                                               // We recompute a new random tophash for the next level so
+                                               // these keys will get evenly distributed across all buckets
+                                               // after multiple grows.
+                                               useY = top & 1
+                                               top = tophash(hash)
+                                       } else {
+                                               if hash&newbit != 0 {
+                                                       useY = 1
+                                               }
+                                       }
+                               }
+
+                               if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
+                                       throw("bad evacuatedN")
+                               }
+
+                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
+                               dst := &xy[useY]                 // evacuation destination
+
+                               if dst.i == abi.MapBucketCount {
+                                       dst.b = h.newoverflow(t, dst.b)
+                                       dst.i = 0
+                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
+                                       dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
+                               }
+                               dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+                               if t.IndirectKey() {
+                                       *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
+                               } else {
+                                       typedmemmove(t.Key, dst.k, k) // copy elem
+                               }
+                               if t.IndirectElem() {
+                                       *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
+                               } else {
+                                       typedmemmove(t.Elem, dst.e, e)
+                               }
+                               dst.i++
+                               // These updates might push these pointers past the end of the
+                               // key or elem arrays.  That's ok, as we have the overflow pointer
+                               // at the end of the bucket to protect against pointing past the
+                               // end of the bucket.
+                               dst.k = add(dst.k, uintptr(t.KeySize))
+                               dst.e = add(dst.e, uintptr(t.ValueSize))
+                       }
+               }
+               // Unlink the overflow buckets & clear key/elem to help GC.
+               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
+                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
+                       // Preserve b.tophash because the evacuation
+                       // state is maintained there.
+                       ptr := add(b, dataOffset)
+                       n := uintptr(t.BucketSize) - dataOffset
+                       memclrHasPointers(ptr, n)
+               }
+       }
+
+       if oldbucket == h.nevacuate {
+               advanceEvacuationMark(h, t, newbit)
+       }
+}
+
+func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
+       h.nevacuate++
+       // Experiments suggest that 1024 is overkill by at least an order of magnitude.
+       // Put it in there as a safeguard anyway, to ensure O(1) behavior.
+       stop := h.nevacuate + 1024
+       if stop > newbit {
+               stop = newbit
+       }
+       for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
+               h.nevacuate++
+       }
+       if h.nevacuate == newbit { // newbit == # of oldbuckets
+               // Growing is all done. Free old main bucket array.
+               h.oldbuckets = nil
+               // Can discard old overflow buckets as well.
+               // If they are still referenced by an iterator,
+               // then the iterator holds a pointers to the slice.
+               if h.extra != nil {
+                       h.extra.oldoverflow = nil
+               }
+               h.flags &^= sameSizeGrow
+       }
+}
+
+// Reflect stubs. Called from ../reflect/asm_*.s
+
+//go:linkname reflect_makemap reflect.makemap
+func reflect_makemap(t *maptype, cap int) *hmap {
+       // Check invariants and reflects math.
+       if t.Key.Equal == nil {
+               throw("runtime.reflect_makemap: unsupported map key type")
+       }
+       if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
+               t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
+               throw("key size wrong")
+       }
+       if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
+               t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
+               throw("elem size wrong")
+       }
+       if t.Key.Align_ > abi.MapBucketCount {
+               throw("key align too big")
+       }
+       if t.Elem.Align_ > abi.MapBucketCount {
+               throw("elem align too big")
+       }
+       if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
+               throw("key size not a multiple of key align")
+       }
+       if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
+               throw("elem size not a multiple of elem align")
+       }
+       if abi.MapBucketCount < 8 {
+               throw("bucketsize too small for proper alignment")
+       }
+       if dataOffset%uintptr(t.Key.Align_) != 0 {
+               throw("need padding in bucket (key)")
+       }
+       if dataOffset%uintptr(t.Elem.Align_) != 0 {
+               throw("need padding in bucket (elem)")
+       }
+
+       return makemap(t, cap, nil)
+}
+
+//go:linkname reflect_mapaccess reflect.mapaccess
+func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       elem, ok := mapaccess2(t, h, key)
+       if !ok {
+               // reflect wants nil for a missing element
+               elem = nil
+       }
+       return elem
+}
+
+//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
+func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
+       elem, ok := mapaccess2_faststr(t, h, key)
+       if !ok {
+               // reflect wants nil for a missing element
+               elem = nil
+       }
+       return elem
+}
+
+//go:linkname reflect_mapassign reflect.mapassign0
+func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
+       p := mapassign(t, h, key)
+       typedmemmove(t.Elem, p, elem)
+}
+
+//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
+func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
+       p := mapassign_faststr(t, h, key)
+       typedmemmove(t.Elem, p, elem)
+}
+
+//go:linkname reflect_mapdelete reflect.mapdelete
+func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+       mapdelete(t, h, key)
+}
+
+//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
+func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
+       mapdelete_faststr(t, h, key)
+}
+
+//go:linkname reflect_mapiterinit reflect.mapiterinit
+func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
+       mapiterinit(t, h, it)
+}
+
+//go:linkname reflect_mapiternext reflect.mapiternext
+func reflect_mapiternext(it *hiter) {
+       mapiternext(it)
+}
+
+//go:linkname reflect_mapiterkey reflect.mapiterkey
+func reflect_mapiterkey(it *hiter) unsafe.Pointer {
+       return it.key
+}
+
+//go:linkname reflect_mapiterelem reflect.mapiterelem
+func reflect_mapiterelem(it *hiter) unsafe.Pointer {
+       return it.elem
+}
+
+//go:linkname reflect_maplen reflect.maplen
+func reflect_maplen(h *hmap) int {
+       if h == nil {
+               return 0
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+       }
+       return h.count
+}
+
+//go:linkname reflect_mapclear reflect.mapclear
+func reflect_mapclear(t *maptype, h *hmap) {
+       mapclear(t, h)
+}
+
+//go:linkname reflectlite_maplen internal/reflectlite.maplen
+func reflectlite_maplen(h *hmap) int {
+       if h == nil {
+               return 0
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+       }
+       return h.count
+}
+
+// mapinitnoop is a no-op function known the Go linker; if a given global
+// map (of the right size) is determined to be dead, the linker will
+// rewrite the relocation (from the package init func) from the outlined
+// map init function to this symbol. Defined in assembly so as to avoid
+// complications with instrumentation (coverage, etc).
+func mapinitnoop()
+
+// mapclone for implementing maps.Clone
+//
+//go:linkname mapclone maps.clone
+func mapclone(m any) any {
+       e := efaceOf(&m)
+       e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
+       return m
+}
+
+// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
+// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
+func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
+       for i := 0; i < abi.MapBucketCount; i++ {
+               if isEmpty(src.tophash[i]) {
+                       continue
+               }
+
+               for ; pos < abi.MapBucketCount; pos++ {
+                       if isEmpty(dst.tophash[pos]) {
+                               break
+                       }
+               }
+
+               if pos == abi.MapBucketCount {
+                       dst = h.newoverflow(t, dst)
+                       pos = 0
+               }
+
+               srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
+               srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
+               dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
+               dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
+
+               dst.tophash[pos] = src.tophash[i]
+               if t.IndirectKey() {
+                       srcK = *(*unsafe.Pointer)(srcK)
+                       if t.NeedKeyUpdate() {
+                               kStore := newobject(t.Key)
+                               typedmemmove(t.Key, kStore, srcK)
+                               srcK = kStore
+                       }
+                       // Note: if NeedKeyUpdate is false, then the memory
+                       // used to store the key is immutable, so we can share
+                       // it between the original map and its clone.
+                       *(*unsafe.Pointer)(dstK) = srcK
+               } else {
+                       typedmemmove(t.Key, dstK, srcK)
+               }
+               if t.IndirectElem() {
+                       srcEle = *(*unsafe.Pointer)(srcEle)
+                       eStore := newobject(t.Elem)
+                       typedmemmove(t.Elem, eStore, srcEle)
+                       *(*unsafe.Pointer)(dstEle) = eStore
+               } else {
+                       typedmemmove(t.Elem, dstEle, srcEle)
+               }
+               pos++
+               h.count++
+       }
+       return dst, pos
+}
+
+func mapclone2(t *maptype, src *hmap) *hmap {
+       dst := makemap(t, src.count, nil)
+       dst.hash0 = src.hash0
+       dst.nevacuate = 0
+       // flags do not need to be copied here, just like a new map has no flags.
+
+       if src.count == 0 {
+               return dst
+       }
+
+       if src.flags&hashWriting != 0 {
+               fatal("concurrent map clone and map write")
+       }
+
+       if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
+               // Quick copy for small maps.
+               dst.buckets = newobject(t.Bucket)
+               dst.count = src.count
+               typedmemmove(t.Bucket, dst.buckets, src.buckets)
+               return dst
+       }
+
+       if dst.B == 0 {
+               dst.buckets = newobject(t.Bucket)
+       }
+       dstArraySize := int(bucketShift(dst.B))
+       srcArraySize := int(bucketShift(src.B))
+       for i := 0; i < dstArraySize; i++ {
+               dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
+               pos := 0
+               for j := 0; j < srcArraySize; j += dstArraySize {
+                       srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
+                       for srcBmap != nil {
+                               dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
+                               srcBmap = srcBmap.overflow(t)
+                       }
+               }
+       }
+
+       if src.oldbuckets == nil {
+               return dst
+       }
+
+       oldB := src.B
+       srcOldbuckets := src.oldbuckets
+       if !src.sameSizeGrow() {
+               oldB--
+       }
+       oldSrcArraySize := int(bucketShift(oldB))
+
+       for i := 0; i < oldSrcArraySize; i++ {
+               srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
+               if evacuated(srcBmap) {
+                       continue
+               }
+
+               if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
+                       dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
+                       for dstBmap.overflow(t) != nil {
+                               dstBmap = dstBmap.overflow(t)
+                       }
+                       pos := 0
+                       for srcBmap != nil {
+                               dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
+                               srcBmap = srcBmap.overflow(t)
+                       }
+                       continue
+               }
+
+               // oldB < dst.B, so a single source bucket may go to multiple destination buckets.
+               // Process entries one at a time.
+               for srcBmap != nil {
+                       // move from oldBlucket to new bucket
+                       for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                               if isEmpty(srcBmap.tophash[i]) {
+                                       continue
+                               }
+
+                               if src.flags&hashWriting != 0 {
+                                       fatal("concurrent map clone and map write")
+                               }
+
+                               srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
+                               if t.IndirectKey() {
+                                       srcK = *((*unsafe.Pointer)(srcK))
+                               }
+
+                               srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
+                               if t.IndirectElem() {
+                                       srcEle = *((*unsafe.Pointer)(srcEle))
+                               }
+                               dstEle := mapassign(t, dst, srcK)
+                               typedmemmove(t.Elem, dstEle, srcEle)
+                       }
+                       srcBmap = srcBmap.overflow(t)
+               }
+       }
+       return dst
+}
+
+// keys for implementing maps.keys
+//
+//go:linkname keys maps.keys
+func keys(m any, p unsafe.Pointer) {
+       e := efaceOf(&m)
+       t := (*maptype)(unsafe.Pointer(e._type))
+       h := (*hmap)(e.data)
+
+       if h == nil || h.count == 0 {
+               return
+       }
+       s := (*slice)(p)
+       r := int(rand())
+       offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
+       if h.B == 0 {
+               copyKeys(t, h, (*bmap)(h.buckets), s, offset)
+               return
+       }
+       arraySize := int(bucketShift(h.B))
+       buckets := h.buckets
+       for i := 0; i < arraySize; i++ {
+               bucket := (i + r) & (arraySize - 1)
+               b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
+               copyKeys(t, h, b, s, offset)
+       }
+
+       if h.growing() {
+               oldArraySize := int(h.noldbuckets())
+               for i := 0; i < oldArraySize; i++ {
+                       bucket := (i + r) & (oldArraySize - 1)
+                       b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
+                       if evacuated(b) {
+                               continue
+                       }
+                       copyKeys(t, h, b, s, offset)
+               }
+       }
+       return
+}
+
+func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
+       for b != nil {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
+                       if isEmpty(b.tophash[offi]) {
+                               continue
+                       }
+                       if h.flags&hashWriting != 0 {
+                               fatal("concurrent map read and map write")
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
+                       if t.IndirectKey() {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if s.len >= s.cap {
+                               fatal("concurrent map read and map write")
+                       }
+                       typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
+                       s.len++
+               }
+               b = b.overflow(t)
+       }
+}
+
+// values for implementing maps.values
+//
+//go:linkname values maps.values
+func values(m any, p unsafe.Pointer) {
+       e := efaceOf(&m)
+       t := (*maptype)(unsafe.Pointer(e._type))
+       h := (*hmap)(e.data)
+       if h == nil || h.count == 0 {
+               return
+       }
+       s := (*slice)(p)
+       r := int(rand())
+       offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
+       if h.B == 0 {
+               copyValues(t, h, (*bmap)(h.buckets), s, offset)
+               return
+       }
+       arraySize := int(bucketShift(h.B))
+       buckets := h.buckets
+       for i := 0; i < arraySize; i++ {
+               bucket := (i + r) & (arraySize - 1)
+               b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
+               copyValues(t, h, b, s, offset)
+       }
+
+       if h.growing() {
+               oldArraySize := int(h.noldbuckets())
+               for i := 0; i < oldArraySize; i++ {
+                       bucket := (i + r) & (oldArraySize - 1)
+                       b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
+                       if evacuated(b) {
+                               continue
+                       }
+                       copyValues(t, h, b, s, offset)
+               }
+       }
+       return
+}
+
+func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
+       for b != nil {
+               for i := uintptr(0); i < abi.MapBucketCount; i++ {
+                       offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
+                       if isEmpty(b.tophash[offi]) {
+                               continue
+                       }
+
+                       if h.flags&hashWriting != 0 {
+                               fatal("concurrent map read and map write")
+                       }
+
+                       ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
+                       if t.IndirectElem() {
+                               ele = *((*unsafe.Pointer)(ele))
+                       }
+                       if s.len >= s.cap {
+                               fatal("concurrent map read and map write")
+                       }
+                       typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
+                       s.len++
+               }
+               b = b.overflow(t)
+       }
+}