]> Cypherpunks repositories - gostls13.git/commitdiff
all: remove GOEXPERIMENT=swissmap
authorMichael Pratt <mpratt@google.com>
Fri, 25 Jul 2025 19:35:36 +0000 (15:35 -0400)
committerGopher Robot <gobot@golang.org>
Wed, 30 Jul 2025 18:47:14 +0000 (11:47 -0700)
For #54766.

Change-Id: I6a6a636c40b5fe2e8b0d4a5e23933492bc8bb76e
Reviewed-on: https://go-review.googlesource.com/c/go/+/691595
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@golang.org>
71 files changed:
src/cmd/compile/internal/gc/main.go
src/cmd/compile/internal/reflectdata/map.go [moved from src/cmd/compile/internal/reflectdata/map_swiss.go with 100% similarity]
src/cmd/compile/internal/reflectdata/map_noswiss.go [deleted file]
src/cmd/compile/internal/reflectdata/reflect.go
src/cmd/compile/internal/rttype/rttype.go
src/cmd/compile/internal/ssagen/ssa.go
src/cmd/compile/internal/test/inl_test.go
src/cmd/compile/internal/typecheck/_builtin/runtime.go
src/cmd/compile/internal/typecheck/builtin.go
src/cmd/compile/internal/types/fmt.go
src/cmd/compile/internal/types/sizeof_test.go
src/cmd/compile/internal/types/type.go
src/cmd/compile/internal/walk/builtin.go
src/cmd/compile/internal/walk/order.go
src/cmd/compile/internal/walk/range.go
src/cmd/compile/internal/walk/walk.go
src/cmd/link/internal/ld/deadcode.go
src/cmd/link/internal/ld/dwarf.go
src/cmd/link/internal/ld/dwarf_test.go
src/hash/maphash/maphash_runtime.go
src/internal/abi/map.go [moved from src/internal/abi/map_swiss.go with 100% similarity]
src/internal/abi/map_noswiss.go [deleted file]
src/internal/abi/map_select_noswiss.go [deleted file]
src/internal/abi/map_select_swiss.go [deleted file]
src/internal/abi/type.go
src/internal/buildcfg/exp.go
src/internal/goexperiment/exp_swissmap_off.go [deleted file]
src/internal/goexperiment/exp_swissmap_on.go [deleted file]
src/internal/goexperiment/flags.go
src/internal/runtime/maps/export_noswiss_test.go [deleted file]
src/internal/runtime/maps/export_swiss_test.go [deleted file]
src/internal/runtime/maps/export_test.go
src/internal/runtime/maps/map.go
src/internal/runtime/maps/map_swiss_test.go [deleted file]
src/internal/runtime/maps/map_test.go
src/internal/runtime/maps/runtime.go
src/internal/runtime/maps/runtime_fast32.go [moved from src/internal/runtime/maps/runtime_fast32_swiss.go with 99% similarity]
src/internal/runtime/maps/runtime_fast64.go [moved from src/internal/runtime/maps/runtime_fast64_swiss.go with 99% similarity]
src/internal/runtime/maps/runtime_faststr.go [moved from src/internal/runtime/maps/runtime_faststr_swiss.go with 99% similarity]
src/internal/runtime/maps/runtime_swiss.go [deleted file]
src/reflect/all_test.go
src/reflect/export_noswiss_test.go [deleted file]
src/reflect/export_swiss_test.go [deleted file]
src/reflect/export_test.go
src/reflect/map.go [moved from src/reflect/map_swiss.go with 93% similarity]
src/reflect/map_noswiss.go [deleted file]
src/reflect/map_noswiss_test.go [deleted file]
src/reflect/map_test.go [moved from src/reflect/map_swiss_test.go with 76% similarity]
src/runtime/export_map_noswiss_test.go [deleted file]
src/runtime/export_map_swiss_test.go [deleted file]
src/runtime/export_test.go
src/runtime/linkname_shim.go [moved from src/runtime/linkname_swiss.go with 98% similarity]
src/runtime/map.go [moved from src/runtime/map_swiss.go with 95% similarity]
src/runtime/map_fast32.go [moved from src/runtime/map_fast32_swiss.go with 98% similarity]
src/runtime/map_fast32_noswiss.go [deleted file]
src/runtime/map_fast64.go [moved from src/runtime/map_fast64_swiss.go with 98% similarity]
src/runtime/map_fast64_noswiss.go [deleted file]
src/runtime/map_faststr.go [moved from src/runtime/map_faststr_swiss.go with 97% similarity]
src/runtime/map_faststr_noswiss.go [deleted file]
src/runtime/map_noswiss.go [deleted file]
src/runtime/map_noswiss_test.go [deleted file]
src/runtime/map_swiss_test.go [deleted file]
src/runtime/map_test.go
src/runtime/runtime-gdb.py
src/runtime/runtime-gdb_test.go
src/runtime/type.go
test/fixedbugs/issue69110.go [deleted file]
test/fixedbugs/issue70189.go
test/live.go
test/live2.go
test/live_regabi.go

index 253ec3257a1a3be432963c2a9a6b191f08de40f5..42e2afaee4fc244183a0c4e5d9f7f9b546bce49c 100644 (file)
@@ -104,12 +104,10 @@ func Main(archInit func(*ssagen.ArchInfo)) {
        ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
        ir.Pkgs.Runtime.Prefix = "runtime"
 
-       if buildcfg.Experiment.SwissMap {
-               // Pseudo-package that contains the compiler's builtin
-               // declarations for maps.
-               ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps")
-               ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps"
-       }
+       // Pseudo-package that contains the compiler's builtin
+       // declarations for maps.
+       ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps")
+       ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps"
 
        // pseudo-packages used in symbol tables
        ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
diff --git a/src/cmd/compile/internal/reflectdata/map_noswiss.go b/src/cmd/compile/internal/reflectdata/map_noswiss.go
deleted file mode 100644 (file)
index a6fab4c..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package reflectdata
-
-import (
-       "internal/abi"
-
-       "cmd/compile/internal/base"
-       "cmd/compile/internal/ir"
-       "cmd/compile/internal/rttype"
-       "cmd/compile/internal/types"
-       "cmd/internal/obj"
-       "cmd/internal/objabi"
-       "cmd/internal/src"
-)
-
-// OldMapBucketType makes the map bucket type given the type of the map.
-func OldMapBucketType(t *types.Type) *types.Type {
-       // Builds a type representing a Bucket structure for
-       // the given map type. This type is not visible to users -
-       // we include only enough information to generate a correct GC
-       // program for it.
-       // Make sure this stays in sync with runtime/map.go.
-       //
-       //      A "bucket" is a "struct" {
-       //            tophash [abi.OldMapBucketCount]uint8
-       //            keys [abi.OldMapBucketCount]keyType
-       //            elems [abi.OldMapBucketCount]elemType
-       //            overflow *bucket
-       //          }
-       if t.MapType().OldBucket != nil {
-               return t.MapType().OldBucket
-       }
-
-       keytype := t.Key()
-       elemtype := t.Elem()
-       types.CalcSize(keytype)
-       types.CalcSize(elemtype)
-       if keytype.Size() > abi.OldMapMaxKeyBytes {
-               keytype = types.NewPtr(keytype)
-       }
-       if elemtype.Size() > abi.OldMapMaxElemBytes {
-               elemtype = types.NewPtr(elemtype)
-       }
-
-       field := make([]*types.Field, 0, 5)
-
-       // The first field is: uint8 topbits[BUCKETSIZE].
-       arr := types.NewArray(types.Types[types.TUINT8], abi.OldMapBucketCount)
-       field = append(field, makefield("topbits", arr))
-
-       arr = types.NewArray(keytype, abi.OldMapBucketCount)
-       arr.SetNoalg(true)
-       keys := makefield("keys", arr)
-       field = append(field, keys)
-
-       arr = types.NewArray(elemtype, abi.OldMapBucketCount)
-       arr.SetNoalg(true)
-       elems := makefield("elems", arr)
-       field = append(field, elems)
-
-       // If keys and elems have no pointers, the map implementation
-       // can keep a list of overflow pointers on the side so that
-       // buckets can be marked as having no pointers.
-       // Arrange for the bucket to have no pointers by changing
-       // the type of the overflow field to uintptr in this case.
-       // See comment on hmap.overflow in runtime/map.go.
-       otyp := types.Types[types.TUNSAFEPTR]
-       if !elemtype.HasPointers() && !keytype.HasPointers() {
-               otyp = types.Types[types.TUINTPTR]
-       }
-       overflow := makefield("overflow", otyp)
-       field = append(field, overflow)
-
-       // link up fields
-       bucket := types.NewStruct(field[:])
-       bucket.SetNoalg(true)
-       types.CalcSize(bucket)
-
-       // Check invariants that map code depends on.
-       if !types.IsComparable(t.Key()) {
-               base.Fatalf("unsupported map key type for %v", t)
-       }
-       if abi.OldMapBucketCount < 8 {
-               base.Fatalf("bucket size %d too small for proper alignment %d", abi.OldMapBucketCount, 8)
-       }
-       if uint8(keytype.Alignment()) > abi.OldMapBucketCount {
-               base.Fatalf("key align too big for %v", t)
-       }
-       if uint8(elemtype.Alignment()) > abi.OldMapBucketCount {
-               base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.OldMapBucketCount)
-       }
-       if keytype.Size() > abi.OldMapMaxKeyBytes {
-               base.Fatalf("key size too large for %v", t)
-       }
-       if elemtype.Size() > abi.OldMapMaxElemBytes {
-               base.Fatalf("elem size too large for %v", t)
-       }
-       if t.Key().Size() > abi.OldMapMaxKeyBytes && !keytype.IsPtr() {
-               base.Fatalf("key indirect incorrect for %v", t)
-       }
-       if t.Elem().Size() > abi.OldMapMaxElemBytes && !elemtype.IsPtr() {
-               base.Fatalf("elem indirect incorrect for %v", t)
-       }
-       if keytype.Size()%keytype.Alignment() != 0 {
-               base.Fatalf("key size not a multiple of key align for %v", t)
-       }
-       if elemtype.Size()%elemtype.Alignment() != 0 {
-               base.Fatalf("elem size not a multiple of elem align for %v", t)
-       }
-       if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
-               base.Fatalf("bucket align not multiple of key align %v", t)
-       }
-       if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
-               base.Fatalf("bucket align not multiple of elem align %v", t)
-       }
-       if keys.Offset%keytype.Alignment() != 0 {
-               base.Fatalf("bad alignment of keys in bmap for %v", t)
-       }
-       if elems.Offset%elemtype.Alignment() != 0 {
-               base.Fatalf("bad alignment of elems in bmap for %v", t)
-       }
-
-       // Double-check that overflow field is final memory in struct,
-       // with no padding at end.
-       if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
-               base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
-                       t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
-       }
-
-       t.MapType().OldBucket = bucket
-
-       bucket.StructType().Map = t
-       return bucket
-}
-
-var oldHmapType *types.Type
-
-// OldMapType returns a type interchangeable with runtime.hmap.
-// Make sure this stays in sync with runtime/map.go.
-func OldMapType() *types.Type {
-       if oldHmapType != nil {
-               return oldHmapType
-       }
-
-       // build a struct:
-       // type hmap struct {
-       //    count      int
-       //    flags      uint8
-       //    B          uint8
-       //    noverflow  uint16
-       //    hash0      uint32
-       //    buckets    unsafe.Pointer
-       //    oldbuckets unsafe.Pointer
-       //    nevacuate  uintptr
-       //    clearSeq   uint64
-       //    extra      unsafe.Pointer // *mapextra
-       // }
-       // must match runtime/map.go:hmap.
-       fields := []*types.Field{
-               makefield("count", types.Types[types.TINT]),
-               makefield("flags", types.Types[types.TUINT8]),
-               makefield("B", types.Types[types.TUINT8]),
-               makefield("noverflow", types.Types[types.TUINT16]),
-               makefield("hash0", types.Types[types.TUINT32]),      // Used in walk.go for OMAKEMAP.
-               makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
-               makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
-               makefield("nevacuate", types.Types[types.TUINTPTR]),
-               makefield("clearSeq", types.Types[types.TUINT64]),
-               makefield("extra", types.Types[types.TUNSAFEPTR]),
-       }
-
-       n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
-       hmap := types.NewNamed(n)
-       n.SetType(hmap)
-       n.SetTypecheck(1)
-
-       hmap.SetUnderlying(types.NewStruct(fields))
-       types.CalcSize(hmap)
-
-       // The size of hmap should be 56 bytes on 64 bit
-       // and 36 bytes on 32 bit platforms.
-       if size := int64(2*8 + 5*types.PtrSize); hmap.Size() != size {
-               base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
-       }
-
-       oldHmapType = hmap
-       return hmap
-}
-
-var oldHiterType *types.Type
-
-// OldMapIterType returns a type interchangeable with runtime.hiter.
-// Make sure this stays in sync with runtime/map.go.
-func OldMapIterType() *types.Type {
-       if oldHiterType != nil {
-               return oldHiterType
-       }
-
-       hmap := OldMapType()
-
-       // build a struct:
-       // type hiter struct {
-       //    key         unsafe.Pointer // *Key
-       //    elem        unsafe.Pointer // *Elem
-       //    t           unsafe.Pointer // *OldMapType
-       //    h           *hmap
-       //    buckets     unsafe.Pointer
-       //    bptr        unsafe.Pointer // *bmap
-       //    overflow    unsafe.Pointer // *[]*bmap
-       //    oldoverflow unsafe.Pointer // *[]*bmap
-       //    startBucket uintptr
-       //    offset      uint8
-       //    wrapped     bool
-       //    B           uint8
-       //    i           uint8
-       //    bucket      uintptr
-       //    checkBucket uintptr
-       //    clearSeq    uint64
-       // }
-       // must match runtime/map.go:hiter.
-       fields := []*types.Field{
-               makefield("key", types.Types[types.TUNSAFEPTR]),  // Used in range.go for TMAP.
-               makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
-               makefield("t", types.Types[types.TUNSAFEPTR]),
-               makefield("h", types.NewPtr(hmap)),
-               makefield("buckets", types.Types[types.TUNSAFEPTR]),
-               makefield("bptr", types.Types[types.TUNSAFEPTR]),
-               makefield("overflow", types.Types[types.TUNSAFEPTR]),
-               makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
-               makefield("startBucket", types.Types[types.TUINTPTR]),
-               makefield("offset", types.Types[types.TUINT8]),
-               makefield("wrapped", types.Types[types.TBOOL]),
-               makefield("B", types.Types[types.TUINT8]),
-               makefield("i", types.Types[types.TUINT8]),
-               makefield("bucket", types.Types[types.TUINTPTR]),
-               makefield("checkBucket", types.Types[types.TUINTPTR]),
-               makefield("clearSeq", types.Types[types.TUINT64]),
-       }
-
-       // build iterator struct holding the above fields
-       n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
-       hiter := types.NewNamed(n)
-       n.SetType(hiter)
-       n.SetTypecheck(1)
-
-       hiter.SetUnderlying(types.NewStruct(fields))
-       types.CalcSize(hiter)
-       if hiter.Size() != int64(8+12*types.PtrSize) {
-               base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 8+12*types.PtrSize)
-       }
-
-       oldHiterType = hiter
-       return hiter
-}
-
-func writeOldMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
-       // internal/abi.OldMapType
-       s1 := writeType(t.Key())
-       s2 := writeType(t.Elem())
-       s3 := writeType(OldMapBucketType(t))
-       hasher := genhash(t.Key())
-
-       c.Field("Key").WritePtr(s1)
-       c.Field("Elem").WritePtr(s2)
-       c.Field("Bucket").WritePtr(s3)
-       c.Field("Hasher").WritePtr(hasher)
-       var flags uint32
-       // Note: flags must match maptype accessors in ../../../../runtime/type.go
-       // and maptype builder in ../../../../reflect/type.go:MapOf.
-       if t.Key().Size() > abi.OldMapMaxKeyBytes {
-               c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
-               flags |= 1 // indirect key
-       } else {
-               c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
-       }
-
-       if t.Elem().Size() > abi.OldMapMaxElemBytes {
-               c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
-               flags |= 2 // indirect value
-       } else {
-               c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
-       }
-       c.Field("BucketSize").WriteUint16(uint16(OldMapBucketType(t).Size()))
-       if types.IsReflexive(t.Key()) {
-               flags |= 4 // reflexive key
-       }
-       if needkeyupdate(t.Key()) {
-               flags |= 8 // need key update
-       }
-       if hashMightPanic(t.Key()) {
-               flags |= 16 // hash might panic
-       }
-       c.Field("Flags").WriteUint32(flags)
-
-       if u := t.Underlying(); u != t {
-               // If t is a named map type, also keep the underlying map
-               // type live in the binary. This is important to make sure that
-               // a named map and that same map cast to its underlying type via
-               // reflection, use the same hash function. See issue 37716.
-               lsym.AddRel(base.Ctxt, obj.Reloc{Type: objabi.R_KEEP, Sym: writeType(u)})
-       }
-}
index 62cef16816c61a851b607c965be8c125f66e5c7c..360536b427d4367ff9d89af2ed585e4a8a37b0a7 100644 (file)
@@ -8,7 +8,6 @@ import (
        "encoding/binary"
        "fmt"
        "internal/abi"
-       "internal/buildcfg"
        "slices"
        "sort"
        "strings"
@@ -773,11 +772,7 @@ func writeType(t *types.Type) *obj.LSym {
                rt = rttype.InterfaceType
                dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
        case types.TMAP:
-               if buildcfg.Experiment.SwissMap {
-                       rt = rttype.SwissMapType
-               } else {
-                       rt = rttype.OldMapType
-               }
+               rt = rttype.MapType
        case types.TPTR:
                rt = rttype.PtrType
                // TODO: use rttype.Type for Elem() is ANY?
@@ -877,11 +872,7 @@ func writeType(t *types.Type) *obj.LSym {
                }
 
        case types.TMAP:
-               if buildcfg.Experiment.SwissMap {
-                       writeSwissMapType(t, lsym, c)
-               } else {
-                       writeOldMapType(t, lsym, c)
-               }
+               writeSwissMapType(t, lsym, c)
 
        case types.TPTR:
                // internal/abi.PtrType
index aaf98dda152f1fa338fcf275ca94c33287e3e80f..670129c7d5b13b0f144cb249c13492bf43ef1d9c 100644 (file)
@@ -27,8 +27,7 @@ var ArrayType *types.Type
 var ChanType *types.Type
 var FuncType *types.Type
 var InterfaceType *types.Type
-var OldMapType *types.Type
-var SwissMapType *types.Type
+var MapType *types.Type
 var PtrType *types.Type
 var SliceType *types.Type
 var StructType *types.Type
@@ -55,8 +54,7 @@ func Init() {
        ChanType = FromReflect(reflect.TypeOf(abi.ChanType{}))
        FuncType = FromReflect(reflect.TypeOf(abi.FuncType{}))
        InterfaceType = FromReflect(reflect.TypeOf(abi.InterfaceType{}))
-       OldMapType = FromReflect(reflect.TypeOf(abi.OldMapType{}))
-       SwissMapType = FromReflect(reflect.TypeOf(abi.SwissMapType{}))
+       MapType = FromReflect(reflect.TypeOf(abi.SwissMapType{}))
        PtrType = FromReflect(reflect.TypeOf(abi.PtrType{}))
        SliceType = FromReflect(reflect.TypeOf(abi.SliceType{}))
        StructType = FromReflect(reflect.TypeOf(abi.StructType{}))
index 053bfc3093accb0d9d615e753fc19117804dc611..2a0b7991ae4787ed81eb6b11d8d623bc82ec3799 100644 (file)
@@ -94,12 +94,8 @@ func InitConfig() {
        _ = types.NewPtr(types.Types[types.TINT16])                             // *int16
        _ = types.NewPtr(types.Types[types.TINT64])                             // *int64
        _ = types.NewPtr(types.ErrorType)                                       // *error
-       if buildcfg.Experiment.SwissMap {
-               _ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map
-       } else {
-               _ = types.NewPtr(reflectdata.OldMapType()) // *runtime.hmap
-       }
-       _ = types.NewPtr(deferstruct()) // *runtime._defer
+       _ = types.NewPtr(reflectdata.SwissMapType())                            // *internal/runtime/maps.Map
+       _ = types.NewPtr(deferstruct())                                         // *runtime._defer
        types.NewPtrCacheEnabled = false
        ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
        ssaConfig.Race = base.Flag.Race
@@ -3083,13 +3079,8 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
                        return v
                }
 
-               // map <--> *hmap
-               var mt *types.Type
-               if buildcfg.Experiment.SwissMap {
-                       mt = types.NewPtr(reflectdata.SwissMapType())
-               } else {
-                       mt = types.NewPtr(reflectdata.OldMapType())
-               }
+               // map <--> *internal/runtime/maps.Map
+               mt := types.NewPtr(reflectdata.SwissMapType())
                if to.Kind() == types.TMAP && from == mt {
                        return v
                }
@@ -5759,13 +5750,13 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
        s.startBlock(bElse)
        switch n.Op() {
        case ir.OLEN:
-               if buildcfg.Experiment.SwissMap && n.X.Type().IsMap() {
-                       // length is stored in the first word.
+               if n.X.Type().IsMap() {
+                       // length is stored in the first word, but needs conversion to int.
                        loadType := reflectdata.SwissMapType().Field(0).Type // uint64
                        load := s.load(loadType, x)
                        s.vars[n] = s.conv(nil, load, loadType, lenType) // integer conversion doesn't need Node
                } else {
-                       // length is stored in the first word for map/chan
+                       // length is stored in the first word for chan, no conversion needed.
                        s.vars[n] = s.load(lenType, x)
                }
        case ir.OCAP:
index ace95f78a6c3ded5de86f4ad222d9f5fd221915f..eda6084b48e7cc22bb08652b36e02110959089ad 100644 (file)
@@ -6,7 +6,6 @@ package test
 
 import (
        "bufio"
-       "internal/goexperiment"
        "internal/testenv"
        "io"
        "math/bits"
@@ -234,15 +233,6 @@ func TestIntendedInlining(t *testing.T) {
                },
        }
 
-       if !goexperiment.SwissMap {
-               // Maps
-               want["runtime"] = append(want["runtime"], "bucketMask")
-               want["runtime"] = append(want["runtime"], "bucketShift")
-               want["runtime"] = append(want["runtime"], "evacuated")
-               want["runtime"] = append(want["runtime"], "tophash")
-               want["runtime"] = append(want["runtime"], "(*bmap).keys")
-               want["runtime"] = append(want["runtime"], "(*bmap).overflow")
-       }
        if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
                // nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable.
                // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
index 628c7e2fbc560a3ba9ae65b431b0a9c45623ae86..296bfdc281d4d5d83c9a81bc9e49fd7ee31b717b 100644 (file)
@@ -152,14 +152,12 @@ func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (v
 func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any)
 func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any)
 func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any)
-func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)  // old maps
-func mapIterStart(mapType *byte, hmap map[any]any, hiter *any) // swiss maps
+func mapIterStart(mapType *byte, hmap map[any]any, hiter *any)
 func mapdelete(mapType *byte, hmap map[any]any, key *any)
 func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32)
 func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64)
 func mapdelete_faststr(mapType *byte, hmap map[any]any, key string)
-func mapiternext(hiter *any) // old maps
-func mapIterNext(hiter *any) // swiss maps
+func mapIterNext(hiter *any)
 func mapclear(mapType *byte, hmap map[any]any)
 
 // *byte is really *runtime.Type
index 730ed4146a75adc878fe628edeb25bf30865925c..535f0fb7e88851e764f53f81c62d7ea52b16d3eb 100644 (file)
@@ -130,13 +130,11 @@ var runtimeDecls = [...]struct {
        {"mapassign_fast64", funcTag, 85},
        {"mapassign_fast64ptr", funcTag, 93},
        {"mapassign_faststr", funcTag, 86},
-       {"mapiterinit", funcTag, 94},
        {"mapIterStart", funcTag, 94},
        {"mapdelete", funcTag, 94},
        {"mapdelete_fast32", funcTag, 95},
        {"mapdelete_fast64", funcTag, 96},
        {"mapdelete_faststr", funcTag, 97},
-       {"mapiternext", funcTag, 98},
        {"mapIterNext", funcTag, 98},
        {"mapclear", funcTag, 99},
        {"makechan64", funcTag, 101},
index 139defafe20b4c12ef457acfd6a81f1105fddeaa..697c62cbc92d2510ce024d39b20d029ee1a7d2b1 100644 (file)
@@ -471,11 +471,9 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
        case TSTRUCT:
                if m := t.StructType().Map; m != nil {
                        mt := m.MapType()
-                       // Format the bucket struct for map[x]y as map.bucket[x]y.
+                       // Format the bucket struct for map[x]y as map.group[x]y.
                        // This avoids a recursive print that generates very long names.
                        switch t {
-                       case mt.OldBucket:
-                               b.WriteString("map.bucket[")
                        case mt.SwissGroup:
                                b.WriteString("map.group[")
                        default:
index 3b2aeece3ed99e47c9492a0fe1fa75489c9d2061..ba033ec499b086dc02a8dce0dabc0c24700559e8 100644 (file)
@@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
        }{
                {Sym{}, 32, 64},
                {Type{}, 60, 96},
-               {Map{}, 16, 32},
+               {Map{}, 12, 24},
                {Forward{}, 20, 32},
                {Func{}, 32, 56},
                {Struct{}, 12, 24},
index c4080ed0b526ae34742d62f950d057840ac2e2e2..47bffb68f465e8e9e56b82450cdc4f8a31a42ab6 100644 (file)
@@ -10,7 +10,6 @@ import (
        "cmd/internal/src"
        "fmt"
        "go/constant"
-       "internal/buildcfg"
        "internal/types/errors"
        "sync"
 )
@@ -281,16 +280,6 @@ type Map struct {
        Key  *Type // Key type
        Elem *Type // Val (elem) type
 
-       // Note: It would be cleaner to completely split Map into OldMap and
-       // SwissMap, but 99% of the types map code doesn't care about the
-       // implementation at all, so it is tons of churn to split the type.
-       // Only code that looks at the bucket field can care about the
-       // implementation.
-
-       // GOEXPERIMENT=noswissmap fields
-       OldBucket *Type // internal struct type representing a hash bucket
-
-       // GOEXPERIMENT=swissmap fields
        SwissGroup *Type // internal struct type representing a slot group
 }
 
@@ -1189,37 +1178,20 @@ func (t *Type) cmp(x *Type) Cmp {
                // by the general code after the switch.
 
        case TSTRUCT:
-               if buildcfg.Experiment.SwissMap {
-                       // Is this a map group type?
-                       if t.StructType().Map == nil {
-                               if x.StructType().Map != nil {
-                                       return CMPlt // nil < non-nil
-                               }
-                               // to the fallthrough
-                       } else if x.StructType().Map == nil {
-                               return CMPgt // nil > non-nil
-                       }
-                       // Both have non-nil Map, fallthrough to the general
-                       // case. Note that the map type does not directly refer
-                       // to the group type (it uses unsafe.Pointer). If it
-                       // did, this would need special handling to avoid
-                       // infinite recursion.
-               } else {
-                       // Is this a map bucket type?
-                       if t.StructType().Map == nil {
-                               if x.StructType().Map != nil {
-                                       return CMPlt // nil < non-nil
-                               }
-                               // to the fallthrough
-                       } else if x.StructType().Map == nil {
-                               return CMPgt // nil > non-nil
+               // Is this a map group type?
+               if t.StructType().Map == nil {
+                       if x.StructType().Map != nil {
+                               return CMPlt // nil < non-nil
                        }
-                       // Both have non-nil Map, fallthrough to the general
-                       // case. Note that the map type does not directly refer
-                       // to the bucket type (it uses unsafe.Pointer). If it
-                       // did, this would need special handling to avoid
-                       // infinite recursion.
+                       // to the general case
+               } else if x.StructType().Map == nil {
+                       return CMPgt // nil > non-nil
                }
+               // Both have non-nil Map, fallthrough to the general
+               // case. Note that the map type does not directly refer
+               // to the group type (it uses unsafe.Pointer). If it
+               // did, this would need special handling to avoid
+               // infinite recursion.
 
                tfs := t.Fields()
                xfs := x.Fields()
index 3b5985d1303664f717bac8fe7bc16002f580314b..1cd3126ce1591b92a902411c0dddb4ab028cf8a5 100644 (file)
@@ -9,7 +9,6 @@ import (
        "go/constant"
        "go/token"
        "internal/abi"
-       "internal/buildcfg"
        "strings"
 
        "cmd/compile/internal/base"
@@ -313,13 +312,6 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
 
 // walkMakeMap walks an OMAKEMAP node.
 func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
-       if buildcfg.Experiment.SwissMap {
-               return walkMakeSwissMap(n, init)
-       }
-       return walkMakeOldMap(n, init)
-}
-
-func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
        t := n.Type()
        mapType := reflectdata.SwissMapType()
        hint := n.Len
@@ -366,12 +358,12 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
                        empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.SwissMapCtrlEmpty))
 
                        // g.ctrl = abi.SwissMapCtrlEmpty
-                       csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map_swiss.go
+                       csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map.go
                        ca := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, g, csym), empty)
                        nif.Body.Append(ca)
 
                        // m.dirPtr = g
-                       dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map_swiss.go
+                       dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map.go
                        na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, dsym), typecheck.ConvNop(g, types.Types[types.TUNSAFEPTR]))
                        nif.Body.Append(na)
                        appendWalkStmt(init, nif)
@@ -391,7 +383,7 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
                        // m map has been allocated on the stack already.
                        // m.seed = uintptr(rand())
                        rand := mkcall("rand", types.Types[types.TUINT64], init)
-                       seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map_swiss.go
+                       seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map.go
                        appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, seedSym), typecheck.Conv(rand, types.Types[types.TUINTPTR])))
                        return typecheck.ConvNop(m, t)
                }
@@ -428,101 +420,6 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
        return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), m)
 }
 
-func walkMakeOldMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
-       t := n.Type()
-       hmapType := reflectdata.OldMapType()
-       hint := n.Len
-
-       // var h *hmap
-       var h ir.Node
-       if n.Esc() == ir.EscNone {
-               // Allocate hmap on stack.
-
-               // var hv hmap
-               // h = &hv
-               h = stackTempAddr(init, hmapType)
-
-               // Allocate one bucket pointed to by hmap.buckets on stack if hint
-               // is not larger than BUCKETSIZE. In case hint is larger than
-               // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
-               // Maximum key and elem size is 128 bytes, larger objects
-               // are stored with an indirection. So max bucket size is 2048+eps.
-               if !ir.IsConst(hint, constant.Int) ||
-                       constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
-
-                       // In case hint is larger than BUCKETSIZE runtime.makemap
-                       // will allocate the buckets on the heap, see #20184
-                       //
-                       // if hint <= BUCKETSIZE {
-                       //     var bv bmap
-                       //     b = &bv
-                       //     h.buckets = b
-                       // }
-
-                       nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.OldMapBucketCount)), nil, nil)
-                       nif.Likely = true
-
-                       // var bv bmap
-                       // b = &bv
-                       b := stackTempAddr(&nif.Body, reflectdata.OldMapBucketType(t))
-
-                       // h.buckets = b
-                       bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
-                       na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
-                       nif.Body.Append(na)
-                       appendWalkStmt(init, nif)
-               }
-       }
-
-       if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
-               // Handling make(map[any]any) and
-               // make(map[any]any, hint) where hint <= BUCKETSIZE
-               // special allows for faster map initialization and
-               // improves binary size by using calls with fewer arguments.
-               // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
-               // and no buckets will be allocated by makemap. Therefore,
-               // no buckets need to be allocated in this code path.
-               if n.Esc() == ir.EscNone {
-                       // Only need to initialize h.hash0 since
-                       // hmap h has been allocated on the stack already.
-                       // h.hash0 = rand32()
-                       rand := mkcall("rand32", types.Types[types.TUINT32], init)
-                       hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
-                       appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
-                       return typecheck.ConvNop(h, t)
-               }
-               // Call runtime.makemap_small to allocate an
-               // hmap on the heap and initialize hmap's hash0 field.
-               fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
-               return mkcall1(fn, n.Type(), init)
-       }
-
-       if n.Esc() != ir.EscNone {
-               h = typecheck.NodNil()
-       }
-       // Map initialization with a variable or large hint is
-       // more complicated. We therefore generate a call to
-       // runtime.makemap to initialize hmap and allocate the
-       // map buckets.
-
-       // When hint fits into int, use makemap instead of
-       // makemap64, which is faster and shorter on 32 bit platforms.
-       fnname := "makemap64"
-       argtype := types.Types[types.TINT64]
-
-       // Type checking guarantees that TIDEAL hint is positive and fits in an int.
-       // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
-       // The case of hint overflow when converting TUINT or TUINTPTR to TINT
-       // will be handled by the negative range checks in makemap during runtime.
-       if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
-               fnname = "makemap"
-               argtype = types.Types[types.TINT]
-       }
-
-       fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
-       return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
-}
-
 // walkMakeSlice walks an OMAKESLICE node.
 func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
        len := n.Len
index daa7f92617df35af04b5d8bb286dd573f0fa83f1..41498fa018f8192e79ad21bf57458a14764df0cb 100644 (file)
@@ -8,7 +8,6 @@ import (
        "fmt"
        "go/constant"
        "internal/abi"
-       "internal/buildcfg"
 
        "cmd/compile/internal/base"
        "cmd/compile/internal/ir"
@@ -967,12 +966,8 @@ func (o *orderState) stmt(n ir.Node) {
                        n.X = o.copyExpr(r)
 
                        // n.Prealloc is the temp for the iterator.
-                       // MapIterType contains pointers and needs to be zeroed.
-                       if buildcfg.Experiment.SwissMap {
-                               n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true)
-                       } else {
-                               n.Prealloc = o.newTemp(reflectdata.OldMapIterType(), true)
-                       }
+                       // SwissMapIterType contains pointers and needs to be zeroed.
+                       n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true)
                }
                n.Key = o.exprInPlace(n.Key)
                n.Value = o.exprInPlace(n.Value)
index 3d3547b84b500c915ef10291760169162ed0a3ba..de42605bd7bd2806ffe911d0b810b89dd0311673 100644 (file)
@@ -6,7 +6,6 @@ package walk
 
 import (
        "go/constant"
-       "internal/buildcfg"
        "unicode/utf8"
 
        "cmd/compile/internal/base"
@@ -247,20 +246,11 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
                hit := nrange.Prealloc
                th := hit.Type()
                // depends on layout of iterator struct.
-               // See cmd/compile/internal/reflectdata/reflect.go:MapIterType
-               var keysym, elemsym *types.Sym
-               var iterInit, iterNext string
-               if buildcfg.Experiment.SwissMap {
-                       keysym = th.Field(0).Sym
-                       elemsym = th.Field(1).Sym // ditto
-                       iterInit = "mapIterStart"
-                       iterNext = "mapIterNext"
-               } else {
-                       keysym = th.Field(0).Sym
-                       elemsym = th.Field(1).Sym // ditto
-                       iterInit = "mapiterinit"
-                       iterNext = "mapiternext"
-               }
+               // See cmd/compile/internal/reflectdata/map.go:SwissMapIterType
+               keysym := th.Field(0).Sym
+               elemsym := th.Field(1).Sym // ditto
+               iterInit := "mapIterStart"
+               iterNext := "mapIterNext"
 
                fn := typecheck.LookupRuntime(iterInit, t.Key(), t.Elem(), th)
                init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
index 2fa51f1280936252afa7427b7a021ed981254c0d..7012f5cd438e0bf0be11f8096acf8e983b6a8336 100644 (file)
@@ -7,7 +7,6 @@ package walk
 import (
        "fmt"
        "internal/abi"
-       "internal/buildcfg"
 
        "cmd/compile/internal/base"
        "cmd/compile/internal/ir"
@@ -192,42 +191,7 @@ var mapassign = mkmapnames("mapassign", "ptr")
 var mapdelete = mkmapnames("mapdelete", "")
 
 func mapfast(t *types.Type) int {
-       if buildcfg.Experiment.SwissMap {
-               return mapfastSwiss(t)
-       }
-       return mapfastOld(t)
-}
-
-func mapfastSwiss(t *types.Type) int {
-       if t.Elem().Size() > abi.OldMapMaxElemBytes {
-               return mapslow
-       }
-       switch reflectdata.AlgType(t.Key()) {
-       case types.AMEM32:
-               if !t.Key().HasPointers() {
-                       return mapfast32
-               }
-               if types.PtrSize == 4 {
-                       return mapfast32ptr
-               }
-               base.Fatalf("small pointer %v", t.Key())
-       case types.AMEM64:
-               if !t.Key().HasPointers() {
-                       return mapfast64
-               }
-               if types.PtrSize == 8 {
-                       return mapfast64ptr
-               }
-               // Two-word object, at least one of which is a pointer.
-               // Use the slow path.
-       case types.ASTRING:
-               return mapfaststr
-       }
-       return mapslow
-}
-
-func mapfastOld(t *types.Type) int {
-       if t.Elem().Size() > abi.OldMapMaxElemBytes {
+       if t.Elem().Size() > abi.SwissMapMaxElemBytes {
                return mapslow
        }
        switch reflectdata.AlgType(t.Key()) {
index cdf7deb31b0b766177982bec1b4086ec78938567..d042b238270f486f7733c1eab6ea1c00f97b8bcb 100644 (file)
@@ -560,13 +560,9 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym
        case abi.Chan: // reflect.chanType
                off += 2 * arch.PtrSize
        case abi.Map:
-               if buildcfg.Experiment.SwissMap {
-                       off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType
-                       if arch.PtrSize == 8 {
-                               off += 4 // padding for final uint32 field (Flags).
-                       }
-               } else {
-                       off += 4*arch.PtrSize + 8 // internal/abi.OldMapType
+               off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType
+               if arch.PtrSize == 8 {
+                       off += 4 // padding for final uint32 field (Flags).
                }
        case abi.Interface: // reflect.interfaceType
                off += 3 * arch.PtrSize
index 602d70ddb98295bd457b5e9fb7f4d4a9c409f750..73b315182918d1f5a860b8341abdb1ba97dadd31 100644 (file)
@@ -872,14 +872,6 @@ func (d *dwctxt) mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valna
 }
 
 func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
-       if buildcfg.Experiment.SwissMap {
-               d.synthesizemaptypesSwiss(ctxt, die)
-       } else {
-               d.synthesizemaptypesOld(ctxt, die)
-       }
-}
-
-func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
        mapType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.Map"))
        tableType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.table"))
        groupsReferenceType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.groupsReference"))
@@ -941,102 +933,6 @@ func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
        }
 }
 
-func (d *dwctxt) synthesizemaptypesOld(ctxt *Link, die *dwarf.DWDie) {
-       hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap"))
-       bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap"))
-
-       if hash == nil {
-               return
-       }
-
-       for ; die != nil; die = die.Link {
-               if die.Abbrev != dwarf.DW_ABRV_MAPTYPE {
-                       continue
-               }
-               gotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym))
-               keytype := decodetypeMapKey(d.ldr, d.arch, gotype)
-               valtype := decodetypeMapValue(d.ldr, d.arch, gotype)
-               keydata := d.ldr.Data(keytype)
-               valdata := d.ldr.Data(valtype)
-               keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata)
-               keytype, valtype = d.walksymtypedef(d.defgotype(keytype)), d.walksymtypedef(d.defgotype(valtype))
-
-               // compute size info like hashmap.c does.
-               indirectKey, indirectVal := false, false
-               if keysize > abi.OldMapMaxKeyBytes {
-                       keysize = int64(d.arch.PtrSize)
-                       indirectKey = true
-               }
-               if valsize > abi.OldMapMaxElemBytes {
-                       valsize = int64(d.arch.PtrSize)
-                       indirectVal = true
-               }
-
-               // Construct type to represent an array of BucketSize keys
-               keyname := d.nameFromDIESym(keytype)
-               dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
-                       newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*keysize, 0)
-                       t := keytype
-                       if indirectKey {
-                               t = d.defptrto(keytype)
-                       }
-                       d.newrefattr(dwhk, dwarf.DW_AT_type, t)
-                       fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size")
-                       newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
-                       d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
-               })
-
-               // Construct type to represent an array of BucketSize values
-               valname := d.nameFromDIESym(valtype)
-               dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
-                       newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*valsize, 0)
-                       t := valtype
-                       if indirectVal {
-                               t = d.defptrto(valtype)
-                       }
-                       d.newrefattr(dwhv, dwarf.DW_AT_type, t)
-                       fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size")
-                       newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
-                       d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
-               })
-
-               // Construct bucket<K,V>
-               dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) {
-                       // Copy over all fields except the field "data" from the generic
-                       // bucket. "data" will be replaced with keys/values below.
-                       d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data"))
-
-                       fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys")
-                       d.newrefattr(fld, dwarf.DW_AT_type, dwhks)
-                       newmemberoffsetattr(fld, abi.OldMapBucketCount)
-                       fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values")
-                       d.newrefattr(fld, dwarf.DW_AT_type, dwhvs)
-                       newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*int32(keysize))
-                       fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow")
-                       d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym)))
-                       newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize)))
-                       if d.arch.RegSize > d.arch.PtrSize {
-                               fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad")
-                               d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
-                               newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
-                       }
-
-                       newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount+abi.OldMapBucketCount*keysize+abi.OldMapBucketCount*valsize+int64(d.arch.RegSize), 0)
-               })
-
-               // Construct hash<K,V>
-               dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) {
-                       d.copychildren(ctxt, dwh, hash)
-                       d.substitutetype(dwh, "buckets", d.defptrto(dwhbs))
-                       d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs))
-                       newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil)
-               })
-
-               // make map type a pointer to hash<K,V>
-               d.newrefattr(die, dwarf.DW_AT_type, d.defptrto(dwhs))
-       }
-}
-
 func (d *dwctxt) synthesizechantypes(ctxt *Link, die *dwarf.DWDie) {
        sudog := walktypedef(d.findprotodie(ctxt, "type:runtime.sudog"))
        waitq := walktypedef(d.findprotodie(ctxt, "type:runtime.waitq"))
@@ -2010,19 +1906,14 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
 
        // Prototypes needed for type synthesis.
        prototypedies = map[string]*dwarf.DWDie{
-               "type:runtime.stringStructDWARF": nil,
-               "type:runtime.slice":             nil,
-               "type:runtime.sudog":             nil,
-               "type:runtime.waitq":             nil,
-               "type:runtime.hchan":             nil,
-       }
-       if buildcfg.Experiment.SwissMap {
-               prototypedies["type:internal/runtime/maps.Map"] = nil
-               prototypedies["type:internal/runtime/maps.table"] = nil
-               prototypedies["type:internal/runtime/maps.groupsReference"] = nil
-       } else {
-               prototypedies["type:runtime.hmap"] = nil
-               prototypedies["type:runtime.bmap"] = nil
+               "type:runtime.stringStructDWARF":             nil,
+               "type:runtime.slice":                         nil,
+               "type:runtime.sudog":                         nil,
+               "type:runtime.waitq":                         nil,
+               "type:runtime.hchan":                         nil,
+               "type:internal/runtime/maps.Map":             nil,
+               "type:internal/runtime/maps.table":           nil,
+               "type:internal/runtime/maps.groupsReference": nil,
        }
 
        // Needed by the prettyprinter code for interface inspection.
@@ -2034,16 +1925,12 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
                "type:internal/abi.PtrType",
                "type:internal/abi.SliceType",
                "type:internal/abi.StructType",
+               "type:internal/abi.SwissMapType",
                "type:internal/abi.InterfaceType",
                "type:internal/abi.ITab",
                "type:internal/abi.Imethod"} {
                d.defgotype(d.lookupOrDiag(typ))
        }
-       if buildcfg.Experiment.SwissMap {
-               d.defgotype(d.lookupOrDiag("type:internal/abi.SwissMapType"))
-       } else {
-               d.defgotype(d.lookupOrDiag("type:internal/abi.OldMapType"))
-       }
 
        // fake root DIE for compile unit DIEs
        var dwroot dwarf.DWDie
index ab086c57f4ff0f548531e18cfded80ab30dbc524..ded7794dc33d6dfb5c834813a1d13d33680e3580 100644 (file)
@@ -63,6 +63,7 @@ func TestRuntimeTypesPresent(t *testing.T) {
                "internal/abi.PtrType":       true,
                "internal/abi.SliceType":     true,
                "internal/abi.StructType":    true,
+               "internal/abi.SwissMapType":  true,
                "internal/abi.InterfaceType": true,
                "internal/abi.ITab":          true,
        }
@@ -71,16 +72,6 @@ func TestRuntimeTypesPresent(t *testing.T) {
        if len(found) != len(want) {
                t.Errorf("found %v, want %v", found, want)
        }
-
-       // Must have one of OldMapType or SwissMapType.
-       want = map[string]bool{
-               "internal/abi.OldMapType":   true,
-               "internal/abi.SwissMapType": true,
-       }
-       found = findTypes(t, dwarf, want)
-       if len(found) != 1 {
-               t.Errorf("map type want one of %v found %v", want, found)
-       }
 }
 
 func findTypes(t *testing.T, dw *dwarf.Data, want map[string]bool) (found map[string]bool) {
index 91e7d49e2c3e07718215d9de113ef6a52798f185..14e2a0c75fd0141f812a7121382610a859140760 100644 (file)
@@ -9,7 +9,6 @@ package maphash
 import (
        "internal/abi"
        "internal/goarch"
-       "internal/goexperiment"
        "unsafe"
 )
 
@@ -51,12 +50,7 @@ func comparableHash[T comparable](v T, seed Seed) uint64 {
        s := seed.s
        var m map[T]struct{}
        mTyp := abi.TypeOf(m)
-       var hasher func(unsafe.Pointer, uintptr) uintptr
-       if goexperiment.SwissMap {
-               hasher = (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher
-       } else {
-               hasher = (*abi.OldMapType)(unsafe.Pointer(mTyp)).Hasher
-       }
+       hasher := (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher
        if goarch.PtrSize == 8 {
                return uint64(hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s)))
        }
diff --git a/src/internal/abi/map_noswiss.go b/src/internal/abi/map_noswiss.go
deleted file mode 100644 (file)
index ff8609e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package abi
-
-import (
-       "unsafe"
-)
-
-// Map constants common to several packages
-// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
-const (
-       // Maximum number of key/elem pairs a bucket can hold.
-       OldMapBucketCountBits = 3 // log2 of number of elements in a bucket.
-       OldMapBucketCount     = 1 << OldMapBucketCountBits
-
-       // Maximum key or elem size to keep inline (instead of mallocing per element).
-       // Must fit in a uint8.
-       // Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
-       OldMapMaxKeyBytes  = 128
-       OldMapMaxElemBytes = 128 // Must fit in a uint8.
-)
-
-type OldMapType struct {
-       Type
-       Key    *Type
-       Elem   *Type
-       Bucket *Type // internal type representing a hash bucket
-       // function for hashing keys (ptr to key, seed) -> hash
-       Hasher     func(unsafe.Pointer, uintptr) uintptr
-       KeySize    uint8  // size of key slot
-       ValueSize  uint8  // size of elem slot
-       BucketSize uint16 // size of bucket
-       Flags      uint32
-}
-
-// Note: flag values must match those used in the TMAP case
-// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
-func (mt *OldMapType) IndirectKey() bool { // store ptr to key instead of key itself
-       return mt.Flags&1 != 0
-}
-func (mt *OldMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
-       return mt.Flags&2 != 0
-}
-func (mt *OldMapType) ReflexiveKey() bool { // true if k==k for all keys
-       return mt.Flags&4 != 0
-}
-func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
-       return mt.Flags&8 != 0
-}
-func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic
-       return mt.Flags&16 != 0
-}
diff --git a/src/internal/abi/map_select_noswiss.go b/src/internal/abi/map_select_noswiss.go
deleted file mode 100644 (file)
index ab2b69d..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package abi
-
-// See comment in map_select_swiss.go.
-type mapType = OldMapType
diff --git a/src/internal/abi/map_select_swiss.go b/src/internal/abi/map_select_swiss.go
deleted file mode 100644 (file)
index 88a0bb2..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package abi
-
-// Select the map type that this binary is built using. This is for common
-// lookup methods like Type.Key to know which type to use.
-//
-// Note that mapType *must not be used by any functions called in the
-// compiler to build a target program* because the compiler must use the map
-// type determined by run-time GOEXPERIMENT, not the build tags used to build
-// the compiler.
-//
-// TODO(prattmic): This package is rather confusing because it has many
-// functions that can't be used by the compiler (e.g., Type.Uncommon depends on
-// the layout of type + uncommon objects in the binary. It would be incorrect
-// for an ad-hoc local Type object). It may be best to move code that isn't
-// usable by the compiler out of the package.
-type mapType = SwissMapType
index 29e17cf6a2d67e89b1c6b858b8a8c3127aaee338..fd6bef4899b6b3421669b4b9f79d3175aa1e0419 100644 (file)
@@ -355,7 +355,7 @@ func (t *Type) Uncommon() *UncommonType {
                return &(*u)(unsafe.Pointer(t)).u
        case Map:
                type u struct {
-                       mapType
+                       SwissMapType
                        u UncommonType
                }
                return &(*u)(unsafe.Pointer(t)).u
@@ -384,7 +384,7 @@ func (t *Type) Elem() *Type {
                tt := (*ChanType)(unsafe.Pointer(t))
                return tt.Elem
        case Map:
-               tt := (*mapType)(unsafe.Pointer(t))
+               tt := (*SwissMapType)(unsafe.Pointer(t))
                return tt.Elem
        case Pointer:
                tt := (*PtrType)(unsafe.Pointer(t))
@@ -404,12 +404,12 @@ func (t *Type) StructType() *StructType {
        return (*StructType)(unsafe.Pointer(t))
 }
 
-// MapType returns t cast to a *OldMapType or *SwissMapType, or nil if its tag does not match.
-func (t *Type) MapType() *mapType {
+// MapType returns t cast to a *SwissMapType, or nil if its tag does not match.
+func (t *Type) MapType() *SwissMapType {
        if t.Kind() != Map {
                return nil
        }
-       return (*mapType)(unsafe.Pointer(t))
+       return (*SwissMapType)(unsafe.Pointer(t))
 }
 
 // ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
@@ -471,7 +471,7 @@ func (t *InterfaceType) NumMethod() int { return len(t.Methods) }
 
 func (t *Type) Key() *Type {
        if t.Kind() == Map {
-               return (*mapType)(unsafe.Pointer(t)).Key
+               return (*SwissMapType)(unsafe.Pointer(t)).Key
        }
        return nil
 }
index 3f3db8f25f710d1bfbc4ac80e31c7b4a16fe0632..df84e9fdf46feb42e9ad5b44e552bc1a31e0a211 100644 (file)
@@ -82,7 +82,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
                RegabiWrappers:  regabiSupported,
                RegabiArgs:      regabiSupported,
                AliasTypeParams: true,
-               SwissMap:        true,
                Dwarf5:          dwarf5Supported,
        }
 
diff --git a/src/internal/goexperiment/exp_swissmap_off.go b/src/internal/goexperiment/exp_swissmap_off.go
deleted file mode 100644 (file)
index 2af40aa..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.swissmap
-
-package goexperiment
-
-const SwissMap = false
-const SwissMapInt = 0
diff --git a/src/internal/goexperiment/exp_swissmap_on.go b/src/internal/goexperiment/exp_swissmap_on.go
deleted file mode 100644 (file)
index 73be49b..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.swissmap
-
-package goexperiment
-
-const SwissMap = true
-const SwissMapInt = 1
index ca99bfbb5cb0343ec4857d42f2e136996dba6a67..dd7a4f446c1a57ec65dfaec8adf7416dabf7d420 100644 (file)
@@ -105,9 +105,6 @@ type Flags struct {
        // This flag will be removed with Go 1.25.
        AliasTypeParams bool
 
-       // SwissMap enables the SwissTable-based map implementation.
-       SwissMap bool
-
        // Synctest enables the testing/synctest package.
        Synctest bool
 
diff --git a/src/internal/runtime/maps/export_noswiss_test.go b/src/internal/runtime/maps/export_noswiss_test.go
deleted file mode 100644 (file)
index 333fc6c..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-// This file allows non-GOEXPERIMENT=swissmap builds (i.e., old map builds) to
-// construct a swissmap table for running the tests in this package.
-
-package maps
-
-import (
-       "internal/abi"
-       "unsafe"
-)
-
-type instantiatedGroup[K comparable, V any] struct {
-       ctrls ctrlGroup
-       slots [abi.SwissMapGroupSlots]instantiatedSlot[K, V]
-}
-
-type instantiatedSlot[K comparable, V any] struct {
-       key  K
-       elem V
-}
-
-func newTestMapType[K comparable, V any]() *abi.SwissMapType {
-       var m map[K]V
-       mTyp := abi.TypeOf(m)
-       omt := (*abi.OldMapType)(unsafe.Pointer(mTyp))
-
-       var grp instantiatedGroup[K, V]
-       var slot instantiatedSlot[K, V]
-
-       mt := &abi.SwissMapType{
-               Key:       omt.Key,
-               Elem:      omt.Elem,
-               Group:     abi.TypeOf(grp),
-               Hasher:    omt.Hasher,
-               SlotSize:  unsafe.Sizeof(slot),
-               GroupSize: unsafe.Sizeof(grp),
-               ElemOff:   unsafe.Offsetof(slot.elem),
-       }
-       if omt.NeedKeyUpdate() {
-               mt.Flags |= abi.SwissMapNeedKeyUpdate
-       }
-       if omt.HashMightPanic() {
-               mt.Flags |= abi.SwissMapHashMightPanic
-       }
-       return mt
-}
diff --git a/src/internal/runtime/maps/export_swiss_test.go b/src/internal/runtime/maps/export_swiss_test.go
deleted file mode 100644 (file)
index 3c6faf5..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package maps
-
-import (
-       "internal/abi"
-       "unsafe"
-)
-
-func newTestMapType[K comparable, V any]() *abi.SwissMapType {
-       var m map[K]V
-       mTyp := abi.TypeOf(m)
-       mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
-       return mt
-}
index 2c7b05ea2de783a58ecaabf4d4f16ee50797c729..372ab89627b418791cbb7e3bf282fa9041c5cbad 100644 (file)
@@ -22,6 +22,13 @@ const MaxAvgGroupLoad = maxAvgGroupLoad
 // we can't properly test hint alloc overflows with this.
 const maxAllocTest = 1 << 30
 
+func newTestMapType[K comparable, V any]() *abi.SwissMapType {
+       var m map[K]V
+       mTyp := abi.TypeOf(m)
+       mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
+       return mt
+}
+
 func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.SwissMapType) {
        mt := newTestMapType[K, V]()
        return NewMap(mt, hint, nil, maxAllocTest), mt
index 017942cfad24fbc9764aa214da34cf005cf8cf41..9781c46463bf146d442b0b5c9a151e7cecaeb314 100644 (file)
@@ -191,7 +191,7 @@ func h2(h uintptr) uintptr {
        return h & 0x7f
 }
 
-// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map_swiss.go:SwissMapType.
+// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:SwissMapType.
 type Map struct {
        // The number of filled slots (i.e. the number of elements in all
        // tables). Excludes deleted slots.
@@ -814,13 +814,6 @@ func (m *Map) Clone(typ *abi.SwissMapType) *Map {
        return m
 }
 
-func OldMapKeyError(t *abi.OldMapType, p unsafe.Pointer) error {
-       if !t.HashMightPanic() {
-               return nil
-       }
-       return mapKeyError2(t.Key, p)
-}
-
 func mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error {
        if !t.HashMightPanic() {
                return nil
diff --git a/src/internal/runtime/maps/map_swiss_test.go b/src/internal/runtime/maps/map_swiss_test.go
deleted file mode 100644 (file)
index eef1c5b..0000000
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests of map internals that need to use the builtin map type, and thus must
-// be built with GOEXPERIMENT=swissmap.
-
-//go:build goexperiment.swissmap
-
-package maps_test
-
-import (
-       "fmt"
-       "internal/abi"
-       "internal/runtime/maps"
-       "testing"
-       "unsafe"
-)
-
-var alwaysFalse bool
-var escapeSink any
-
-func escape[T any](x T) T {
-       if alwaysFalse {
-               escapeSink = x
-       }
-       return x
-}
-
-const (
-       belowMax = abi.SwissMapGroupSlots * 3 / 2                                               // 1.5 * group max = 2 groups @ 75%
-       atMax    = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full.
-)
-
-func TestTableGroupCount(t *testing.T) {
-       // Test that maps of different sizes have the right number of
-       // tables/groups.
-
-       type mapCount struct {
-               tables int
-               groups uint64
-       }
-
-       type mapCase struct {
-               initialLit  mapCount
-               initialHint mapCount
-               after       mapCount
-       }
-
-       var testCases = []struct {
-               n      int     // n is the number of map elements
-               escape mapCase // expected values for escaping map
-       }{
-               {
-                       n: -(1 << 30),
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{0, 0},
-                               after:       mapCount{0, 0},
-                       },
-               },
-               {
-                       n: -1,
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{0, 0},
-                               after:       mapCount{0, 0},
-                       },
-               },
-               {
-                       n: 0,
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{0, 0},
-                               after:       mapCount{0, 0},
-                       },
-               },
-               {
-                       n: 1,
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{0, 0},
-                               after:       mapCount{0, 1},
-                       },
-               },
-               {
-                       n: abi.SwissMapGroupSlots,
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{0, 0},
-                               after:       mapCount{0, 1},
-                       },
-               },
-               {
-                       n: abi.SwissMapGroupSlots + 1,
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{1, 2},
-                               after:       mapCount{1, 2},
-                       },
-               },
-               {
-                       n: belowMax, // 1.5 group max = 2 groups @ 75%
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{1, 2},
-                               after:       mapCount{1, 2},
-                       },
-               },
-               {
-                       n: atMax, // 2 groups at max
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{1, 2},
-                               after:       mapCount{1, 2},
-                       },
-               },
-               {
-                       n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{1, 4},
-                               after:       mapCount{1, 4},
-                       },
-               },
-               {
-                       n: 2 * belowMax, // 3 * group max = 4 groups @75%
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{1, 4},
-                               after:       mapCount{1, 4},
-                       },
-               },
-               {
-                       n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups
-                       escape: mapCase{
-                               initialLit:  mapCount{0, 0},
-                               initialHint: mapCount{1, 8},
-                               after:       mapCount{1, 8},
-                       },
-               },
-       }
-
-       testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) {
-               mm := *(**maps.Map)(unsafe.Pointer(&m))
-
-               gotTab := mm.TableCount()
-               if gotTab != initial.tables {
-                       t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables)
-               }
-
-               gotGroup := mm.GroupCount()
-               if gotGroup != initial.groups {
-                       t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups)
-               }
-
-               for i := 0; i < n; i++ {
-                       m[i] = i
-               }
-
-               gotTab = mm.TableCount()
-               if gotTab != after.tables {
-                       t.Errorf("after TableCount got %d want %d", gotTab, after.tables)
-               }
-
-               gotGroup = mm.GroupCount()
-               if gotGroup != after.groups {
-                       t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups)
-               }
-       }
-
-       t.Run("mapliteral", func(t *testing.T) {
-               for _, tc := range testCases {
-                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
-                               t.Run("escape", func(t *testing.T) {
-                                       m := escape(map[int]int{})
-                                       testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
-                               })
-                       })
-               }
-       })
-       t.Run("nohint", func(t *testing.T) {
-               for _, tc := range testCases {
-                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
-                               t.Run("escape", func(t *testing.T) {
-                                       m := escape(make(map[int]int))
-                                       testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
-                               })
-                       })
-               }
-       })
-       t.Run("makemap", func(t *testing.T) {
-               for _, tc := range testCases {
-                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
-                               t.Run("escape", func(t *testing.T) {
-                                       m := escape(make(map[int]int, tc.n))
-                                       testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
-                               })
-                       })
-               }
-       })
-       t.Run("makemap64", func(t *testing.T) {
-               for _, tc := range testCases {
-                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
-                               t.Run("escape", func(t *testing.T) {
-                                       m := escape(make(map[int]int, int64(tc.n)))
-                                       testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
-                               })
-                       })
-               }
-       })
-}
-
-func TestTombstoneGrow(t *testing.T) {
-       tableSizes := []int{16, 32, 64, 128, 256}
-       for _, tableSize := range tableSizes {
-               for _, load := range []string{"low", "mid", "high"} {
-                       capacity := tableSize * 7 / 8
-                       var initialElems int
-                       switch load {
-                       case "low":
-                               initialElems = capacity / 8
-                       case "mid":
-                               initialElems = capacity / 2
-                       case "high":
-                               initialElems = capacity
-                       }
-                       t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) {
-                               allocs := testing.AllocsPerRun(1, func() {
-                                       // Fill the map with elements.
-                                       m := make(map[int]int, capacity)
-                                       for i := range initialElems {
-                                               m[i] = i
-                                       }
-
-                                       // This is the heart of our test.
-                                       // Loop over the map repeatedly, deleting a key then adding a not-yet-seen key
-                                       // while keeping the map at a ~constant number of elements (+/-1).
-                                       nextKey := initialElems
-                                       for range 100000 {
-                                               for k := range m {
-                                                       delete(m, k)
-                                                       break
-                                               }
-                                               m[nextKey] = nextKey
-                                               nextKey++
-                                               if len(m) != initialElems {
-                                                       t.Fatal("len(m) should remain constant")
-                                               }
-                                       }
-                               })
-
-                               // The make has 4 allocs (map, directory, table, groups).
-                               // Each growth has 2 allocs (table, groups).
-                               // We allow two growths if we start full, 1 otherwise.
-                               // Fail (somewhat arbitrarily) if there are more than that.
-                               allowed := float64(4 + 1*2)
-                               if initialElems == capacity {
-                                       allowed += 2
-                               }
-                               if allocs > allowed {
-                                       t.Fatalf("got %v allocations, allowed %v", allocs, allowed)
-                               }
-                       })
-               }
-       }
-}
index 160450ebb2f9820b17e45af56c435063f1b02da4..a5fab548328b7568ab35609e18d7693662d8df7b 100644 (file)
@@ -699,3 +699,252 @@ func TestMapDeleteClear(t *testing.T) {
                t.Errorf("Delete(%d) failed to clear element. got %d want 0", key, gotElem)
        }
 }
+
+var alwaysFalse bool
+var escapeSink any
+
+func escape[T any](x T) T {
+       if alwaysFalse {
+               escapeSink = x
+       }
+       return x
+}
+
+const (
+       belowMax = abi.SwissMapGroupSlots * 3 / 2                                               // 1.5 * group max = 2 groups @ 75%
+       atMax    = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full.
+)
+
+func TestTableGroupCount(t *testing.T) {
+       // Test that maps of different sizes have the right number of
+       // tables/groups.
+
+       type mapCount struct {
+               tables int
+               groups uint64
+       }
+
+       type mapCase struct {
+               initialLit  mapCount
+               initialHint mapCount
+               after       mapCount
+       }
+
+       var testCases = []struct {
+               n      int     // n is the number of map elements
+               escape mapCase // expected values for escaping map
+       }{
+               {
+                       n: -(1 << 30),
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{0, 0},
+                               after:       mapCount{0, 0},
+                       },
+               },
+               {
+                       n: -1,
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{0, 0},
+                               after:       mapCount{0, 0},
+                       },
+               },
+               {
+                       n: 0,
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{0, 0},
+                               after:       mapCount{0, 0},
+                       },
+               },
+               {
+                       n: 1,
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{0, 0},
+                               after:       mapCount{0, 1},
+                       },
+               },
+               {
+                       n: abi.SwissMapGroupSlots,
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{0, 0},
+                               after:       mapCount{0, 1},
+                       },
+               },
+               {
+                       n: abi.SwissMapGroupSlots + 1,
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{1, 2},
+                               after:       mapCount{1, 2},
+                       },
+               },
+               {
+                       n: belowMax, // 1.5 group max = 2 groups @ 75%
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{1, 2},
+                               after:       mapCount{1, 2},
+                       },
+               },
+               {
+                       n: atMax, // 2 groups at max
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{1, 2},
+                               after:       mapCount{1, 2},
+                       },
+               },
+               {
+                       n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{1, 4},
+                               after:       mapCount{1, 4},
+                       },
+               },
+               {
+                       n: 2 * belowMax, // 3 * group max = 4 groups @75%
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{1, 4},
+                               after:       mapCount{1, 4},
+                       },
+               },
+               {
+                       n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups
+                       escape: mapCase{
+                               initialLit:  mapCount{0, 0},
+                               initialHint: mapCount{1, 8},
+                               after:       mapCount{1, 8},
+                       },
+               },
+       }
+
+       testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) {
+               mm := *(**maps.Map)(unsafe.Pointer(&m))
+
+               gotTab := mm.TableCount()
+               if gotTab != initial.tables {
+                       t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables)
+               }
+
+               gotGroup := mm.GroupCount()
+               if gotGroup != initial.groups {
+                       t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups)
+               }
+
+               for i := 0; i < n; i++ {
+                       m[i] = i
+               }
+
+               gotTab = mm.TableCount()
+               if gotTab != after.tables {
+                       t.Errorf("after TableCount got %d want %d", gotTab, after.tables)
+               }
+
+               gotGroup = mm.GroupCount()
+               if gotGroup != after.groups {
+                       t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups)
+               }
+       }
+
+       t.Run("mapliteral", func(t *testing.T) {
+               for _, tc := range testCases {
+                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+                               t.Run("escape", func(t *testing.T) {
+                                       m := escape(map[int]int{})
+                                       testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
+                               })
+                       })
+               }
+       })
+       t.Run("nohint", func(t *testing.T) {
+               for _, tc := range testCases {
+                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+                               t.Run("escape", func(t *testing.T) {
+                                       m := escape(make(map[int]int))
+                                       testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
+                               })
+                       })
+               }
+       })
+       t.Run("makemap", func(t *testing.T) {
+               for _, tc := range testCases {
+                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+                               t.Run("escape", func(t *testing.T) {
+                                       m := escape(make(map[int]int, tc.n))
+                                       testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
+                               })
+                       })
+               }
+       })
+       t.Run("makemap64", func(t *testing.T) {
+               for _, tc := range testCases {
+                       t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+                               t.Run("escape", func(t *testing.T) {
+                                       m := escape(make(map[int]int, int64(tc.n)))
+                                       testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
+                               })
+                       })
+               }
+       })
+}
+
+func TestTombstoneGrow(t *testing.T) {
+       tableSizes := []int{16, 32, 64, 128, 256}
+       for _, tableSize := range tableSizes {
+               for _, load := range []string{"low", "mid", "high"} {
+                       capacity := tableSize * 7 / 8
+                       var initialElems int
+                       switch load {
+                       case "low":
+                               initialElems = capacity / 8
+                       case "mid":
+                               initialElems = capacity / 2
+                       case "high":
+                               initialElems = capacity
+                       }
+                       t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) {
+                               allocs := testing.AllocsPerRun(1, func() {
+                                       // Fill the map with elements.
+                                       m := make(map[int]int, capacity)
+                                       for i := range initialElems {
+                                               m[i] = i
+                                       }
+
+                                       // This is the heart of our test.
+                                       // Loop over the map repeatedly, deleting a key then adding a not-yet-seen key
+                                       // while keeping the map at a ~constant number of elements (+/-1).
+                                       nextKey := initialElems
+                                       for range 100000 {
+                                               for k := range m {
+                                                       delete(m, k)
+                                                       break
+                                               }
+                                               m[nextKey] = nextKey
+                                               nextKey++
+                                               if len(m) != initialElems {
+                                                       t.Fatal("len(m) should remain constant")
+                                               }
+                                       }
+                               })
+
+                               // The make has 4 allocs (map, directory, table, groups).
+                               // Each growth has 2 allocs (table, groups).
+                               // We allow two growths if we start full, 1 otherwise.
+                               // Fail (somewhat arbitrarily) if there are more than that.
+                               allowed := float64(4 + 1*2)
+                               if initialElems == capacity {
+                                       allowed += 2
+                               }
+                               if allocs > allowed {
+                                       t.Fatalf("got %v allocations, allowed %v", allocs, allowed)
+                               }
+                       })
+               }
+       }
+}
index 3d06f54f4d37ae71f00760a049ccd6291317880a..c4a3e07041f2d8fb97a9f98a820cbfa7efb10ea4 100644 (file)
@@ -6,6 +6,10 @@ package maps
 
 import (
        "internal/abi"
+       "internal/asan"
+       "internal/msan"
+       "internal/race"
+       "internal/runtime/sys"
        "unsafe"
 )
 
@@ -28,3 +32,337 @@ func newarray(typ *abi.Type, n int) unsafe.Pointer
 
 //go:linkname newobject
 func newobject(typ *abi.Type) unsafe.Pointer
+
+// Pushed from runtime in order to use runtime.plainError
+//
+//go:linkname errNilAssign
+var errNilAssign error
+
+// Pull from runtime. It is important that is this the exact same copy as the
+// runtime because runtime.mapaccess1_fat compares the returned pointer with
+// &runtime.zeroVal[0].
+// TODO: move zeroVal to internal/abi?
+//
+//go:linkname zeroVal runtime.zeroVal
+var zeroVal [abi.ZeroValSize]byte
+
+// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
+// it will return a reference to the zero object for the elem type if
+// the key is not in the map.
+// NOTE: The returned pointer may keep the whole map live, so don't
+// hold onto it for very long.
+//
+//go:linkname runtime_mapaccess1 runtime.mapaccess1
+func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+       if race.Enabled && m != nil {
+               callerpc := sys.GetCallerPC()
+               pc := abi.FuncPCABIInternal(runtime_mapaccess1)
+               race.ReadPC(unsafe.Pointer(m), callerpc, pc)
+               race.ReadObjectPC(typ.Key, key, callerpc, pc)
+       }
+       if msan.Enabled && m != nil {
+               msan.Read(key, typ.Key.Size_)
+       }
+       if asan.Enabled && m != nil {
+               asan.Read(key, typ.Key.Size_)
+       }
+
+       if m == nil || m.Used() == 0 {
+               if err := mapKeyError(typ, key); err != nil {
+                       panic(err) // see issue 23734
+               }
+               return unsafe.Pointer(&zeroVal[0])
+       }
+
+       if m.writing != 0 {
+               fatal("concurrent map read and map write")
+       }
+
+       hash := typ.Hasher(key, m.seed)
+
+       if m.dirLen <= 0 {
+               _, elem, ok := m.getWithKeySmall(typ, hash, key)
+               if !ok {
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+               return elem
+       }
+
+       // Select table.
+       idx := m.directoryIndex(hash)
+       t := m.directoryAt(idx)
+
+       // Probe table.
+       seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+       for ; ; seq = seq.next() {
+               g := t.groups.group(typ, seq.offset)
+
+               match := g.ctrls().matchH2(h2(hash))
+
+               for match != 0 {
+                       i := match.first()
+
+                       slotKey := g.key(typ, i)
+                       slotKeyOrig := slotKey
+                       if typ.IndirectKey() {
+                               slotKey = *((*unsafe.Pointer)(slotKey))
+                       }
+                       if typ.Key.Equal(key, slotKey) {
+                               slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+                               if typ.IndirectElem() {
+                                       slotElem = *((*unsafe.Pointer)(slotElem))
+                               }
+                               return slotElem
+                       }
+                       match = match.removeFirst()
+               }
+
+               match = g.ctrls().matchEmpty()
+               if match != 0 {
+                       // Finding an empty slot means we've reached the end of
+                       // the probe sequence.
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+       }
+}
+
+//go:linkname runtime_mapaccess2 runtime.mapaccess2
+func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
+       if race.Enabled && m != nil {
+               callerpc := sys.GetCallerPC()
+               pc := abi.FuncPCABIInternal(runtime_mapaccess1)
+               race.ReadPC(unsafe.Pointer(m), callerpc, pc)
+               race.ReadObjectPC(typ.Key, key, callerpc, pc)
+       }
+       if msan.Enabled && m != nil {
+               msan.Read(key, typ.Key.Size_)
+       }
+       if asan.Enabled && m != nil {
+               asan.Read(key, typ.Key.Size_)
+       }
+
+       if m == nil || m.Used() == 0 {
+               if err := mapKeyError(typ, key); err != nil {
+                       panic(err) // see issue 23734
+               }
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+
+       if m.writing != 0 {
+               fatal("concurrent map read and map write")
+       }
+
+       hash := typ.Hasher(key, m.seed)
+
+       if m.dirLen == 0 {
+               _, elem, ok := m.getWithKeySmall(typ, hash, key)
+               if !ok {
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+               return elem, true
+       }
+
+       // Select table.
+       idx := m.directoryIndex(hash)
+       t := m.directoryAt(idx)
+
+       // Probe table.
+       seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+       for ; ; seq = seq.next() {
+               g := t.groups.group(typ, seq.offset)
+
+               match := g.ctrls().matchH2(h2(hash))
+
+               for match != 0 {
+                       i := match.first()
+
+                       slotKey := g.key(typ, i)
+                       slotKeyOrig := slotKey
+                       if typ.IndirectKey() {
+                               slotKey = *((*unsafe.Pointer)(slotKey))
+                       }
+                       if typ.Key.Equal(key, slotKey) {
+                               slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+                               if typ.IndirectElem() {
+                                       slotElem = *((*unsafe.Pointer)(slotElem))
+                               }
+                               return slotElem, true
+                       }
+                       match = match.removeFirst()
+               }
+
+               match = g.ctrls().matchEmpty()
+               if match != 0 {
+                       // Finding an empty slot means we've reached the end of
+                       // the probe sequence.
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+       }
+}
+
+//go:linkname runtime_mapassign runtime.mapassign
+func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+       if m == nil {
+               panic(errNilAssign)
+       }
+       if race.Enabled {
+               callerpc := sys.GetCallerPC()
+               pc := abi.FuncPCABIInternal(runtime_mapassign)
+               race.WritePC(unsafe.Pointer(m), callerpc, pc)
+               race.ReadObjectPC(typ.Key, key, callerpc, pc)
+       }
+       if msan.Enabled {
+               msan.Read(key, typ.Key.Size_)
+       }
+       if asan.Enabled {
+               asan.Read(key, typ.Key.Size_)
+       }
+       if m.writing != 0 {
+               fatal("concurrent map writes")
+       }
+
+       hash := typ.Hasher(key, m.seed)
+
+       // Set writing after calling Hasher, since Hasher may panic, in which
+       // case we have not actually done a write.
+       m.writing ^= 1 // toggle, see comment on writing
+
+       if m.dirPtr == nil {
+               m.growToSmall(typ)
+       }
+
+       if m.dirLen == 0 {
+               if m.used < abi.SwissMapGroupSlots {
+                       elem := m.putSlotSmall(typ, hash, key)
+
+                       if m.writing == 0 {
+                               fatal("concurrent map writes")
+                       }
+                       m.writing ^= 1
+
+                       return elem
+               }
+
+               // Can't fit another entry, grow to full size map.
+               m.growToTable(typ)
+       }
+
+       var slotElem unsafe.Pointer
+outer:
+       for {
+               // Select table.
+               idx := m.directoryIndex(hash)
+               t := m.directoryAt(idx)
+
+               seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+
+               // As we look for a match, keep track of the first deleted slot
+               // we find, which we'll use to insert the new entry if
+               // necessary.
+               var firstDeletedGroup groupReference
+               var firstDeletedSlot uintptr
+
+               for ; ; seq = seq.next() {
+                       g := t.groups.group(typ, seq.offset)
+                       match := g.ctrls().matchH2(h2(hash))
+
+                       // Look for an existing slot containing this key.
+                       for match != 0 {
+                               i := match.first()
+
+                               slotKey := g.key(typ, i)
+                               slotKeyOrig := slotKey
+                               if typ.IndirectKey() {
+                                       slotKey = *((*unsafe.Pointer)(slotKey))
+                               }
+                               if typ.Key.Equal(key, slotKey) {
+                                       if typ.NeedKeyUpdate() {
+                                               typedmemmove(typ.Key, slotKey, key)
+                                       }
+
+                                       slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+                                       if typ.IndirectElem() {
+                                               slotElem = *((*unsafe.Pointer)(slotElem))
+                                       }
+
+                                       t.checkInvariants(typ, m)
+                                       break outer
+                               }
+                               match = match.removeFirst()
+                       }
+
+                       // No existing slot for this key in this group. Is this the end
+                       // of the probe sequence?
+                       match = g.ctrls().matchEmpty()
+                       if match != 0 {
+                               // Finding an empty slot means we've reached the end of
+                               // the probe sequence.
+
+                               var i uintptr
+
+                               // If we found a deleted slot along the way, we
+                               // can replace it without consuming growthLeft.
+                               if firstDeletedGroup.data != nil {
+                                       g = firstDeletedGroup
+                                       i = firstDeletedSlot
+                                       t.growthLeft++ // will be decremented below to become a no-op.
+                               } else {
+                                       // Otherwise, use the empty slot.
+                                       i = match.first()
+                               }
+
+                               // If there is room left to grow, just insert the new entry.
+                               if t.growthLeft > 0 {
+                                       slotKey := g.key(typ, i)
+                                       slotKeyOrig := slotKey
+                                       if typ.IndirectKey() {
+                                               kmem := newobject(typ.Key)
+                                               *(*unsafe.Pointer)(slotKey) = kmem
+                                               slotKey = kmem
+                                       }
+                                       typedmemmove(typ.Key, slotKey, key)
+
+                                       slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+                                       if typ.IndirectElem() {
+                                               emem := newobject(typ.Elem)
+                                               *(*unsafe.Pointer)(slotElem) = emem
+                                               slotElem = emem
+                                       }
+
+                                       g.ctrls().set(i, ctrl(h2(hash)))
+                                       t.growthLeft--
+                                       t.used++
+                                       m.used++
+
+                                       t.checkInvariants(typ, m)
+                                       break outer
+                               }
+
+                               t.rehash(typ, m)
+                               continue outer
+                       }
+
+                       // No empty slots in this group. Check for a deleted
+                       // slot, which we'll use if we don't find a match later
+                       // in the probe sequence.
+                       //
+                       // We only need to remember a single deleted slot.
+                       if firstDeletedGroup.data == nil {
+                               // Since we already checked for empty slots
+                               // above, matches here must be deleted slots.
+                               match = g.ctrls().matchEmptyOrDeleted()
+                               if match != 0 {
+                                       firstDeletedGroup = g
+                                       firstDeletedSlot = match.first()
+                               }
+                       }
+               }
+       }
+
+       if m.writing == 0 {
+               fatal("concurrent map writes")
+       }
+       m.writing ^= 1
+
+       return slotElem
+}
similarity index 99%
rename from src/internal/runtime/maps/runtime_fast32_swiss.go
rename to src/internal/runtime/maps/runtime_fast32.go
index d57d04252702944b62020466dd969fb090b2d3a9..aece31a2a4cbcdfc0b58200492c463d603d551e1 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package maps
 
 import (
similarity index 99%
rename from src/internal/runtime/maps/runtime_fast64_swiss.go
rename to src/internal/runtime/maps/runtime_fast64.go
index 461cb1d318225072b2fc7814cf86ead6017cf657..1e06d17837e0fc3bd25930fbbf18127e50671784 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package maps
 
 import (
similarity index 99%
rename from src/internal/runtime/maps/runtime_faststr_swiss.go
rename to src/internal/runtime/maps/runtime_faststr.go
index 0d7b02e20c28d2ecf06dfd846cba38b0933f9799..8c6944e6b878c10e304df621bfca9329a7de103f 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package maps
 
 import (
diff --git a/src/internal/runtime/maps/runtime_swiss.go b/src/internal/runtime/maps/runtime_swiss.go
deleted file mode 100644 (file)
index 3ea0181..0000000
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package maps
-
-import (
-       "internal/abi"
-       "internal/asan"
-       "internal/msan"
-       "internal/race"
-       "internal/runtime/sys"
-       "unsafe"
-)
-
-// Functions below pushed from runtime.
-
-// Pushed from runtime in order to use runtime.plainError
-//
-//go:linkname errNilAssign
-var errNilAssign error
-
-// Pull from runtime. It is important that is this the exact same copy as the
-// runtime because runtime.mapaccess1_fat compares the returned pointer with
-// &runtime.zeroVal[0].
-// TODO: move zeroVal to internal/abi?
-//
-//go:linkname zeroVal runtime.zeroVal
-var zeroVal [abi.ZeroValSize]byte
-
-// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
-// it will return a reference to the zero object for the elem type if
-// the key is not in the map.
-// NOTE: The returned pointer may keep the whole map live, so don't
-// hold onto it for very long.
-//
-//go:linkname runtime_mapaccess1 runtime.mapaccess1
-func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
-       if race.Enabled && m != nil {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(runtime_mapaccess1)
-               race.ReadPC(unsafe.Pointer(m), callerpc, pc)
-               race.ReadObjectPC(typ.Key, key, callerpc, pc)
-       }
-       if msan.Enabled && m != nil {
-               msan.Read(key, typ.Key.Size_)
-       }
-       if asan.Enabled && m != nil {
-               asan.Read(key, typ.Key.Size_)
-       }
-
-       if m == nil || m.Used() == 0 {
-               if err := mapKeyError(typ, key); err != nil {
-                       panic(err) // see issue 23734
-               }
-               return unsafe.Pointer(&zeroVal[0])
-       }
-
-       if m.writing != 0 {
-               fatal("concurrent map read and map write")
-       }
-
-       hash := typ.Hasher(key, m.seed)
-
-       if m.dirLen <= 0 {
-               _, elem, ok := m.getWithKeySmall(typ, hash, key)
-               if !ok {
-                       return unsafe.Pointer(&zeroVal[0])
-               }
-               return elem
-       }
-
-       // Select table.
-       idx := m.directoryIndex(hash)
-       t := m.directoryAt(idx)
-
-       // Probe table.
-       seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
-       for ; ; seq = seq.next() {
-               g := t.groups.group(typ, seq.offset)
-
-               match := g.ctrls().matchH2(h2(hash))
-
-               for match != 0 {
-                       i := match.first()
-
-                       slotKey := g.key(typ, i)
-                       slotKeyOrig := slotKey
-                       if typ.IndirectKey() {
-                               slotKey = *((*unsafe.Pointer)(slotKey))
-                       }
-                       if typ.Key.Equal(key, slotKey) {
-                               slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
-                               if typ.IndirectElem() {
-                                       slotElem = *((*unsafe.Pointer)(slotElem))
-                               }
-                               return slotElem
-                       }
-                       match = match.removeFirst()
-               }
-
-               match = g.ctrls().matchEmpty()
-               if match != 0 {
-                       // Finding an empty slot means we've reached the end of
-                       // the probe sequence.
-                       return unsafe.Pointer(&zeroVal[0])
-               }
-       }
-}
-
-//go:linkname runtime_mapaccess2 runtime.mapaccess2
-func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
-       if race.Enabled && m != nil {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(runtime_mapaccess1)
-               race.ReadPC(unsafe.Pointer(m), callerpc, pc)
-               race.ReadObjectPC(typ.Key, key, callerpc, pc)
-       }
-       if msan.Enabled && m != nil {
-               msan.Read(key, typ.Key.Size_)
-       }
-       if asan.Enabled && m != nil {
-               asan.Read(key, typ.Key.Size_)
-       }
-
-       if m == nil || m.Used() == 0 {
-               if err := mapKeyError(typ, key); err != nil {
-                       panic(err) // see issue 23734
-               }
-               return unsafe.Pointer(&zeroVal[0]), false
-       }
-
-       if m.writing != 0 {
-               fatal("concurrent map read and map write")
-       }
-
-       hash := typ.Hasher(key, m.seed)
-
-       if m.dirLen == 0 {
-               _, elem, ok := m.getWithKeySmall(typ, hash, key)
-               if !ok {
-                       return unsafe.Pointer(&zeroVal[0]), false
-               }
-               return elem, true
-       }
-
-       // Select table.
-       idx := m.directoryIndex(hash)
-       t := m.directoryAt(idx)
-
-       // Probe table.
-       seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
-       for ; ; seq = seq.next() {
-               g := t.groups.group(typ, seq.offset)
-
-               match := g.ctrls().matchH2(h2(hash))
-
-               for match != 0 {
-                       i := match.first()
-
-                       slotKey := g.key(typ, i)
-                       slotKeyOrig := slotKey
-                       if typ.IndirectKey() {
-                               slotKey = *((*unsafe.Pointer)(slotKey))
-                       }
-                       if typ.Key.Equal(key, slotKey) {
-                               slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
-                               if typ.IndirectElem() {
-                                       slotElem = *((*unsafe.Pointer)(slotElem))
-                               }
-                               return slotElem, true
-                       }
-                       match = match.removeFirst()
-               }
-
-               match = g.ctrls().matchEmpty()
-               if match != 0 {
-                       // Finding an empty slot means we've reached the end of
-                       // the probe sequence.
-                       return unsafe.Pointer(&zeroVal[0]), false
-               }
-       }
-}
-
-//go:linkname runtime_mapassign runtime.mapassign
-func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
-       if m == nil {
-               panic(errNilAssign)
-       }
-       if race.Enabled {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(runtime_mapassign)
-               race.WritePC(unsafe.Pointer(m), callerpc, pc)
-               race.ReadObjectPC(typ.Key, key, callerpc, pc)
-       }
-       if msan.Enabled {
-               msan.Read(key, typ.Key.Size_)
-       }
-       if asan.Enabled {
-               asan.Read(key, typ.Key.Size_)
-       }
-       if m.writing != 0 {
-               fatal("concurrent map writes")
-       }
-
-       hash := typ.Hasher(key, m.seed)
-
-       // Set writing after calling Hasher, since Hasher may panic, in which
-       // case we have not actually done a write.
-       m.writing ^= 1 // toggle, see comment on writing
-
-       if m.dirPtr == nil {
-               m.growToSmall(typ)
-       }
-
-       if m.dirLen == 0 {
-               if m.used < abi.SwissMapGroupSlots {
-                       elem := m.putSlotSmall(typ, hash, key)
-
-                       if m.writing == 0 {
-                               fatal("concurrent map writes")
-                       }
-                       m.writing ^= 1
-
-                       return elem
-               }
-
-               // Can't fit another entry, grow to full size map.
-               m.growToTable(typ)
-       }
-
-       var slotElem unsafe.Pointer
-outer:
-       for {
-               // Select table.
-               idx := m.directoryIndex(hash)
-               t := m.directoryAt(idx)
-
-               seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
-
-               // As we look for a match, keep track of the first deleted slot
-               // we find, which we'll use to insert the new entry if
-               // necessary.
-               var firstDeletedGroup groupReference
-               var firstDeletedSlot uintptr
-
-               for ; ; seq = seq.next() {
-                       g := t.groups.group(typ, seq.offset)
-                       match := g.ctrls().matchH2(h2(hash))
-
-                       // Look for an existing slot containing this key.
-                       for match != 0 {
-                               i := match.first()
-
-                               slotKey := g.key(typ, i)
-                               slotKeyOrig := slotKey
-                               if typ.IndirectKey() {
-                                       slotKey = *((*unsafe.Pointer)(slotKey))
-                               }
-                               if typ.Key.Equal(key, slotKey) {
-                                       if typ.NeedKeyUpdate() {
-                                               typedmemmove(typ.Key, slotKey, key)
-                                       }
-
-                                       slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
-                                       if typ.IndirectElem() {
-                                               slotElem = *((*unsafe.Pointer)(slotElem))
-                                       }
-
-                                       t.checkInvariants(typ, m)
-                                       break outer
-                               }
-                               match = match.removeFirst()
-                       }
-
-                       // No existing slot for this key in this group. Is this the end
-                       // of the probe sequence?
-                       match = g.ctrls().matchEmpty()
-                       if match != 0 {
-                               // Finding an empty slot means we've reached the end of
-                               // the probe sequence.
-
-                               var i uintptr
-
-                               // If we found a deleted slot along the way, we
-                               // can replace it without consuming growthLeft.
-                               if firstDeletedGroup.data != nil {
-                                       g = firstDeletedGroup
-                                       i = firstDeletedSlot
-                                       t.growthLeft++ // will be decremented below to become a no-op.
-                               } else {
-                                       // Otherwise, use the empty slot.
-                                       i = match.first()
-                               }
-
-                               // If there is room left to grow, just insert the new entry.
-                               if t.growthLeft > 0 {
-                                       slotKey := g.key(typ, i)
-                                       slotKeyOrig := slotKey
-                                       if typ.IndirectKey() {
-                                               kmem := newobject(typ.Key)
-                                               *(*unsafe.Pointer)(slotKey) = kmem
-                                               slotKey = kmem
-                                       }
-                                       typedmemmove(typ.Key, slotKey, key)
-
-                                       slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
-                                       if typ.IndirectElem() {
-                                               emem := newobject(typ.Elem)
-                                               *(*unsafe.Pointer)(slotElem) = emem
-                                               slotElem = emem
-                                       }
-
-                                       g.ctrls().set(i, ctrl(h2(hash)))
-                                       t.growthLeft--
-                                       t.used++
-                                       m.used++
-
-                                       t.checkInvariants(typ, m)
-                                       break outer
-                               }
-
-                               t.rehash(typ, m)
-                               continue outer
-                       }
-
-                       // No empty slots in this group. Check for a deleted
-                       // slot, which we'll use if we don't find a match later
-                       // in the probe sequence.
-                       //
-                       // We only need to remember a single deleted slot.
-                       if firstDeletedGroup.data == nil {
-                               // Since we already checked for empty slots
-                               // above, matches here must be deleted slots.
-                               match = g.ctrls().matchEmptyOrDeleted()
-                               if match != 0 {
-                                       firstDeletedGroup = g
-                                       firstDeletedSlot = match.first()
-                               }
-                       }
-               }
-       }
-
-       if m.writing == 0 {
-               fatal("concurrent map writes")
-       }
-       m.writing ^= 1
-
-       return slotElem
-}
index cd3e306a5753e561dc564059b4499823ea0ca306..da7b2d77640f571c05a8bbf29377656349fc221b 100644 (file)
@@ -12,7 +12,6 @@ import (
        "go/token"
        "internal/asan"
        "internal/goarch"
-       "internal/goexperiment"
        "internal/msan"
        "internal/race"
        "internal/testenv"
@@ -1277,10 +1276,6 @@ var deepEqualPerfTests = []struct {
 }
 
 func TestDeepEqualAllocs(t *testing.T) {
-       // TODO(prattmic): maps on stack
-       if goexperiment.SwissMap {
-               t.Skipf("Maps on stack not yet implemented")
-       }
        if asan.Enabled {
                t.Skip("test allocates more with -asan; see #70079")
        }
@@ -7343,7 +7338,8 @@ func TestGCBits(t *testing.T) {
        verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
        verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
 
-       testGCBitsMap(t)
+       // For maps, we don't manually construct GC data, instead using the
+       // public reflect API in groupAndSlotOf.
 }
 
 func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
diff --git a/src/reflect/export_noswiss_test.go b/src/reflect/export_noswiss_test.go
deleted file mode 100644 (file)
index 286ba63..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2024 Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package reflect
-
-import (
-       "unsafe"
-)
-
-func MapBucketOf(x, y Type) Type {
-       return toType(bucketOf(x.common(), y.common()))
-}
-
-func CachedBucketOf(m Type) Type {
-       t := m.(*rtype)
-       if Kind(t.t.Kind()) != Map {
-               panic("not map")
-       }
-       tt := (*mapType)(unsafe.Pointer(t))
-       return toType(tt.Bucket)
-}
diff --git a/src/reflect/export_swiss_test.go b/src/reflect/export_swiss_test.go
deleted file mode 100644 (file)
index ac3cd0a..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2024 Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package reflect
-
-func MapGroupOf(x, y Type) Type {
-       grp, _ := groupAndSlotOf(x, y)
-       return grp
-}
index eedd063fcb8e7de1ebeffb57b3fecfdf4689cb4d..fc209fdfbab190b314f989f5314a2e736eb00ae8 100644 (file)
@@ -152,3 +152,8 @@ var MethodValueCallCodePtr = methodValueCallCodePtr
 var InternalIsZero = isZero
 
 var IsRegularMemory = isRegularMemory
+
+func MapGroupOf(x, y Type) Type {
+       grp, _ := groupAndSlotOf(x, y)
+       return grp
+}
similarity index 93%
rename from src/reflect/map_swiss.go
rename to src/reflect/map.go
index 2f31d6689ffde5115bc4f55f4d78ada6c6fad657..a9cc5b1162c23b37b76af96b1887778c89c11565 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package reflect
 
 import (
@@ -14,16 +12,11 @@ import (
        "unsafe"
 )
 
-// mapType represents a map type.
-//
-// TODO(prattmic): Only used within this file, could be cleaned up.
-type mapType = abi.SwissMapType
-
 func (t *rtype) Key() Type {
        if t.Kind() != Map {
                panic("reflect: Key of non-map type " + t.String())
        }
-       tt := (*mapType)(unsafe.Pointer(t))
+       tt := (*abi.SwissMapType)(unsafe.Pointer(t))
        return toType(tt.Key)
 }
 
@@ -50,7 +43,7 @@ func MapOf(key, elem Type) Type {
        // Look in known types.
        s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
        for _, tt := range typesByString(s) {
-               mt := (*mapType)(unsafe.Pointer(tt))
+               mt := (*abi.SwissMapType)(unsafe.Pointer(tt))
                if mt.Key == ktyp && mt.Elem == etyp {
                        ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
                        return ti.(Type)
@@ -63,7 +56,7 @@ func MapOf(key, elem Type) Type {
        // Note: flag values must match those used in the TMAP case
        // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
        var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
-       mt := **(**mapType)(unsafe.Pointer(&imap))
+       mt := **(**abi.SwissMapType)(unsafe.Pointer(&imap))
        mt.Str = resolveReflectName(newName(s, "", false, false))
        mt.TFlag = abi.TFlagDirectIface
        mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
@@ -145,7 +138,7 @@ var stringType = rtypeOf("")
 // As in Go, the key's value must be assignable to the map's key type.
 func (v Value) MapIndex(key Value) Value {
        v.mustBe(Map)
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
+       tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
 
        // Do not require key to be exported, so that DeepEqual
        // and other programs can use all the keys returned by
@@ -209,7 +202,7 @@ func mapIterNext(it *maps.Iter) {
 // It returns an empty slice if v represents a nil map.
 func (v Value) MapKeys() []Value {
        v.mustBe(Map)
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
+       tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
        keyType := tt.Key
 
        fl := v.flag.ro() | flag(keyType.Kind())
@@ -248,10 +241,6 @@ type MapIter struct {
        hiter maps.Iter
 }
 
-// TODO(prattmic): only for sharing the linkname declarations with old maps.
-// Remove with old maps.
-type hiter = maps.Iter
-
 // Key returns the key of iter's current map entry.
 func (iter *MapIter) Key() Value {
        if !iter.hiter.Initialized() {
@@ -262,7 +251,7 @@ func (iter *MapIter) Key() Value {
                panic("MapIter.Key called on exhausted iterator")
        }
 
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
        ktype := t.Key
        return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
 }
@@ -287,7 +276,7 @@ func (v Value) SetIterKey(iter *MapIter) {
                target = v.ptr
        }
 
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
        ktype := t.Key
 
        iter.m.mustBeExported() // do not let unexported m leak
@@ -306,7 +295,7 @@ func (iter *MapIter) Value() Value {
                panic("MapIter.Value called on exhausted iterator")
        }
 
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
        vtype := t.Elem
        return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
 }
@@ -331,7 +320,7 @@ func (v Value) SetIterValue(iter *MapIter) {
                target = v.ptr
        }
 
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+       t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
        vtype := t.Elem
 
        iter.m.mustBeExported() // do not let unexported m leak
@@ -348,7 +337,7 @@ func (iter *MapIter) Next() bool {
                panic("MapIter.Next called on an iterator that does not have an associated map Value")
        }
        if !iter.hiter.Initialized() {
-               t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+               t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
                m := (*maps.Map)(iter.m.pointer())
                mapIterStart(t, m, &iter.hiter)
        } else {
@@ -408,7 +397,7 @@ func (v Value) SetMapIndex(key, elem Value) {
        v.mustBe(Map)
        v.mustBeExported()
        key.mustBeExported()
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
+       tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
 
        if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
                k := *(*string)(key.ptr)
diff --git a/src/reflect/map_noswiss.go b/src/reflect/map_noswiss.go
deleted file mode 100644 (file)
index 36f6fab..0000000
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package reflect
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       "unsafe"
-)
-
-// mapType represents a map type.
-type mapType struct {
-       abi.OldMapType
-}
-
-// Pushed from runtime.
-
-//go:noescape
-func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter)
-
-//go:noescape
-func mapiternext(it *hiter)
-
-func (t *rtype) Key() Type {
-       if t.Kind() != Map {
-               panic("reflect: Key of non-map type " + t.String())
-       }
-       tt := (*mapType)(unsafe.Pointer(t))
-       return toType(tt.Key)
-}
-
-// MapOf returns the map type with the given key and element types.
-// For example, if k represents int and e represents string,
-// MapOf(k, e) represents map[int]string.
-//
-// If the key type is not a valid map key type (that is, if it does
-// not implement Go's == operator), MapOf panics.
-func MapOf(key, elem Type) Type {
-       ktyp := key.common()
-       etyp := elem.common()
-
-       if ktyp.Equal == nil {
-               panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
-       }
-
-       // Look in cache.
-       ckey := cacheKey{Map, ktyp, etyp, 0}
-       if mt, ok := lookupCache.Load(ckey); ok {
-               return mt.(Type)
-       }
-
-       // Look in known types.
-       s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
-       for _, tt := range typesByString(s) {
-               mt := (*mapType)(unsafe.Pointer(tt))
-               if mt.Key == ktyp && mt.Elem == etyp {
-                       ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
-                       return ti.(Type)
-               }
-       }
-
-       // Make a map type.
-       // Note: flag values must match those used in the TMAP case
-       // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
-       var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
-       mt := **(**mapType)(unsafe.Pointer(&imap))
-       mt.Str = resolveReflectName(newName(s, "", false, false))
-       mt.TFlag = abi.TFlagDirectIface
-       mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
-       mt.Key = ktyp
-       mt.Elem = etyp
-       mt.Bucket = bucketOf(ktyp, etyp)
-       mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
-               return typehash(ktyp, p, seed)
-       }
-       mt.Flags = 0
-       if ktyp.Size_ > abi.OldMapMaxKeyBytes {
-               mt.KeySize = uint8(goarch.PtrSize)
-               mt.Flags |= 1 // indirect key
-       } else {
-               mt.KeySize = uint8(ktyp.Size_)
-       }
-       if etyp.Size_ > abi.OldMapMaxElemBytes {
-               mt.ValueSize = uint8(goarch.PtrSize)
-               mt.Flags |= 2 // indirect value
-       } else {
-               mt.ValueSize = uint8(etyp.Size_)
-       }
-       mt.BucketSize = uint16(mt.Bucket.Size_)
-       if isReflexive(ktyp) {
-               mt.Flags |= 4
-       }
-       if needKeyUpdate(ktyp) {
-               mt.Flags |= 8
-       }
-       if hashMightPanic(ktyp) {
-               mt.Flags |= 16
-       }
-       mt.PtrToThis = 0
-
-       ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
-       return ti.(Type)
-}
-
-func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
-       if ktyp.Size_ > abi.OldMapMaxKeyBytes {
-               ktyp = ptrTo(ktyp)
-       }
-       if etyp.Size_ > abi.OldMapMaxElemBytes {
-               etyp = ptrTo(etyp)
-       }
-
-       // Prepare GC data if any.
-       // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
-       // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
-       // Note that since the key and value are known to be <= 128 bytes,
-       // they're guaranteed to have bitmaps instead of GC programs.
-       var gcdata *byte
-       var ptrdata uintptr
-
-       size := abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
-       if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
-               panic("reflect: bad size computation in MapOf")
-       }
-
-       if ktyp.Pointers() || etyp.Pointers() {
-               nptr := (abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
-               n := (nptr + 7) / 8
-
-               // Runtime needs pointer masks to be a multiple of uintptr in size.
-               n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
-               mask := make([]byte, n)
-               base := uintptr(abi.OldMapBucketCount / goarch.PtrSize)
-
-               if ktyp.Pointers() {
-                       emitGCMask(mask, base, ktyp, abi.OldMapBucketCount)
-               }
-               base += abi.OldMapBucketCount * ktyp.Size_ / goarch.PtrSize
-
-               if etyp.Pointers() {
-                       emitGCMask(mask, base, etyp, abi.OldMapBucketCount)
-               }
-               base += abi.OldMapBucketCount * etyp.Size_ / goarch.PtrSize
-
-               word := base
-               mask[word/8] |= 1 << (word % 8)
-               gcdata = &mask[0]
-               ptrdata = (word + 1) * goarch.PtrSize
-
-               // overflow word must be last
-               if ptrdata != size {
-                       panic("reflect: bad layout computation in MapOf")
-               }
-       }
-
-       b := &abi.Type{
-               Align_:   goarch.PtrSize,
-               Size_:    size,
-               Kind_:    abi.Struct,
-               PtrBytes: ptrdata,
-               GCData:   gcdata,
-       }
-       s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
-       b.Str = resolveReflectName(newName(s, "", false, false))
-       return b
-}
-
-var stringType = rtypeOf("")
-
-// MapIndex returns the value associated with key in the map v.
-// It panics if v's Kind is not [Map].
-// It returns the zero Value if key is not found in the map or if v represents a nil map.
-// As in Go, the key's value must be assignable to the map's key type.
-func (v Value) MapIndex(key Value) Value {
-       v.mustBe(Map)
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
-
-       // Do not require key to be exported, so that DeepEqual
-       // and other programs can use all the keys returned by
-       // MapKeys as arguments to MapIndex. If either the map
-       // or the key is unexported, though, the result will be
-       // considered unexported. This is consistent with the
-       // behavior for structs, which allow read but not write
-       // of unexported fields.
-
-       var e unsafe.Pointer
-       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
-               k := *(*string)(key.ptr)
-               e = mapaccess_faststr(v.typ(), v.pointer(), k)
-       } else {
-               key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil)
-               var k unsafe.Pointer
-               if key.flag&flagIndir != 0 {
-                       k = key.ptr
-               } else {
-                       k = unsafe.Pointer(&key.ptr)
-               }
-               e = mapaccess(v.typ(), v.pointer(), k)
-       }
-       if e == nil {
-               return Value{}
-       }
-       typ := tt.Elem
-       fl := (v.flag | key.flag).ro()
-       fl |= flag(typ.Kind())
-       return copyVal(typ, fl, e)
-}
-
-// MapKeys returns a slice containing all the keys present in the map,
-// in unspecified order.
-// It panics if v's Kind is not [Map].
-// It returns an empty slice if v represents a nil map.
-func (v Value) MapKeys() []Value {
-       v.mustBe(Map)
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
-       keyType := tt.Key
-
-       fl := v.flag.ro() | flag(keyType.Kind())
-
-       m := v.pointer()
-       mlen := int(0)
-       if m != nil {
-               mlen = maplen(m)
-       }
-       var it hiter
-       mapiterinit(v.typ(), m, &it)
-       a := make([]Value, mlen)
-       var i int
-       for i = 0; i < len(a); i++ {
-               key := it.key
-               if key == nil {
-                       // Someone deleted an entry from the map since we
-                       // called maplen above. It's a data race, but nothing
-                       // we can do about it.
-                       break
-               }
-               a[i] = copyVal(keyType, fl, key)
-               mapiternext(&it)
-       }
-       return a[:i]
-}
-
-// hiter's structure matches runtime.hiter's structure.
-// Having a clone here allows us to embed a map iterator
-// inside type MapIter so that MapIters can be re-used
-// without doing any allocations.
-type hiter struct {
-       key         unsafe.Pointer
-       elem        unsafe.Pointer
-       t           unsafe.Pointer
-       h           unsafe.Pointer
-       buckets     unsafe.Pointer
-       bptr        unsafe.Pointer
-       overflow    *[]unsafe.Pointer
-       oldoverflow *[]unsafe.Pointer
-       startBucket uintptr
-       offset      uint8
-       wrapped     bool
-       B           uint8
-       i           uint8
-       bucket      uintptr
-       checkBucket uintptr
-       clearSeq    uint64
-}
-
-func (h *hiter) initialized() bool {
-       return h.t != nil
-}
-
-// A MapIter is an iterator for ranging over a map.
-// See [Value.MapRange].
-type MapIter struct {
-       m     Value
-       hiter hiter
-}
-
-// Key returns the key of iter's current map entry.
-func (iter *MapIter) Key() Value {
-       if !iter.hiter.initialized() {
-               panic("MapIter.Key called before Next")
-       }
-       iterkey := iter.hiter.key
-       if iterkey == nil {
-               panic("MapIter.Key called on exhausted iterator")
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       ktype := t.Key
-       return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
-}
-
-// SetIterKey assigns to v the key of iter's current map entry.
-// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
-// As in Go, the key must be assignable to v's type and
-// must not be derived from an unexported field.
-// It panics if [Value.CanSet] returns false.
-func (v Value) SetIterKey(iter *MapIter) {
-       if !iter.hiter.initialized() {
-               panic("reflect: Value.SetIterKey called before Next")
-       }
-       iterkey := iter.hiter.key
-       if iterkey == nil {
-               panic("reflect: Value.SetIterKey called on exhausted iterator")
-       }
-
-       v.mustBeAssignable()
-       var target unsafe.Pointer
-       if v.kind() == Interface {
-               target = v.ptr
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       ktype := t.Key
-
-       iter.m.mustBeExported() // do not let unexported m leak
-       key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
-       key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target)
-       typedmemmove(v.typ(), v.ptr, key.ptr)
-}
-
-// Value returns the value of iter's current map entry.
-func (iter *MapIter) Value() Value {
-       if !iter.hiter.initialized() {
-               panic("MapIter.Value called before Next")
-       }
-       iterelem := iter.hiter.elem
-       if iterelem == nil {
-               panic("MapIter.Value called on exhausted iterator")
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       vtype := t.Elem
-       return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
-}
-
-// SetIterValue assigns to v the value of iter's current map entry.
-// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
-// As in Go, the value must be assignable to v's type and
-// must not be derived from an unexported field.
-// It panics if [Value.CanSet] returns false.
-func (v Value) SetIterValue(iter *MapIter) {
-       if !iter.hiter.initialized() {
-               panic("reflect: Value.SetIterValue called before Next")
-       }
-       iterelem := iter.hiter.elem
-       if iterelem == nil {
-               panic("reflect: Value.SetIterValue called on exhausted iterator")
-       }
-
-       v.mustBeAssignable()
-       var target unsafe.Pointer
-       if v.kind() == Interface {
-               target = v.ptr
-       }
-
-       t := (*mapType)(unsafe.Pointer(iter.m.typ()))
-       vtype := t.Elem
-
-       iter.m.mustBeExported() // do not let unexported m leak
-       elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
-       elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target)
-       typedmemmove(v.typ(), v.ptr, elem.ptr)
-}
-
-// Next advances the map iterator and reports whether there is another
-// entry. It returns false when iter is exhausted; subsequent
-// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic.
-func (iter *MapIter) Next() bool {
-       if !iter.m.IsValid() {
-               panic("MapIter.Next called on an iterator that does not have an associated map Value")
-       }
-       if !iter.hiter.initialized() {
-               mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
-       } else {
-               if iter.hiter.key == nil {
-                       panic("MapIter.Next called on exhausted iterator")
-               }
-               mapiternext(&iter.hiter)
-       }
-       return iter.hiter.key != nil
-}
-
-// Reset modifies iter to iterate over v.
-// It panics if v's Kind is not [Map] and v is not the zero Value.
-// Reset(Value{}) causes iter to not to refer to any map,
-// which may allow the previously iterated-over map to be garbage collected.
-func (iter *MapIter) Reset(v Value) {
-       if v.IsValid() {
-               v.mustBe(Map)
-       }
-       iter.m = v
-       iter.hiter = hiter{}
-}
-
-// MapRange returns a range iterator for a map.
-// It panics if v's Kind is not [Map].
-//
-// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry.
-// [MapIter.Next] returns false when the iterator is exhausted.
-// MapRange follows the same iteration semantics as a range statement.
-//
-// Example:
-//
-//     iter := reflect.ValueOf(m).MapRange()
-//     for iter.Next() {
-//             k := iter.Key()
-//             v := iter.Value()
-//             ...
-//     }
-func (v Value) MapRange() *MapIter {
-       // This is inlinable to take advantage of "function outlining".
-       // The allocation of MapIter can be stack allocated if the caller
-       // does not allow it to escape.
-       // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
-       if v.kind() != Map {
-               v.panicNotMap()
-       }
-       return &MapIter{m: v}
-}
-
-// SetMapIndex sets the element associated with key in the map v to elem.
-// It panics if v's Kind is not [Map].
-// If elem is the zero Value, SetMapIndex deletes the key from the map.
-// Otherwise if v holds a nil map, SetMapIndex will panic.
-// As in Go, key's elem must be assignable to the map's key type,
-// and elem's value must be assignable to the map's elem type.
-func (v Value) SetMapIndex(key, elem Value) {
-       v.mustBe(Map)
-       v.mustBeExported()
-       key.mustBeExported()
-       tt := (*mapType)(unsafe.Pointer(v.typ()))
-
-       if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
-               k := *(*string)(key.ptr)
-               if elem.typ() == nil {
-                       mapdelete_faststr(v.typ(), v.pointer(), k)
-                       return
-               }
-               elem.mustBeExported()
-               elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
-               var e unsafe.Pointer
-               if elem.flag&flagIndir != 0 {
-                       e = elem.ptr
-               } else {
-                       e = unsafe.Pointer(&elem.ptr)
-               }
-               mapassign_faststr(v.typ(), v.pointer(), k, e)
-               return
-       }
-
-       key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil)
-       var k unsafe.Pointer
-       if key.flag&flagIndir != 0 {
-               k = key.ptr
-       } else {
-               k = unsafe.Pointer(&key.ptr)
-       }
-       if elem.typ() == nil {
-               mapdelete(v.typ(), v.pointer(), k)
-               return
-       }
-       elem.mustBeExported()
-       elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
-       var e unsafe.Pointer
-       if elem.flag&flagIndir != 0 {
-               e = elem.ptr
-       } else {
-               e = unsafe.Pointer(&elem.ptr)
-       }
-       mapassign(v.typ(), v.pointer(), k, e)
-}
-
-// Force slow panicking path not inlined, so it won't add to the
-// inlining budget of the caller.
-// TODO: undo when the inliner is no longer bottom-up only.
-//
-//go:noinline
-func (f flag) panicNotMap() {
-       f.mustBe(Map)
-}
diff --git a/src/reflect/map_noswiss_test.go b/src/reflect/map_noswiss_test.go
deleted file mode 100644 (file)
index 52fcf89..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package reflect_test
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       . "reflect"
-       "testing"
-)
-
-func testGCBitsMap(t *testing.T) {
-       const bucketCount = abi.OldMapBucketCount
-
-       hdr := make([]byte, bucketCount/goarch.PtrSize)
-
-       verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
-               verifyGCBits(t, MapBucketOf(k, e), want)
-               verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
-       }
-       verifyMapBucket(t,
-               Tscalar, Tptr,
-               map[Xscalar]Xptr(nil),
-               join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
-       verifyMapBucket(t,
-               Tscalarptr, Tptr,
-               map[Xscalarptr]Xptr(nil),
-               join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
-       verifyMapBucket(t, Tint64, Tptr,
-               map[int64]Xptr(nil),
-               join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
-       verifyMapBucket(t,
-               Tscalar, Tscalar,
-               map[Xscalar]Xscalar(nil),
-               empty)
-       verifyMapBucket(t,
-               ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
-               map[[2]Xscalarptr][3]Xptrscalar(nil),
-               join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
-       verifyMapBucket(t,
-               ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
-               map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
-               join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
-       verifyMapBucket(t,
-               ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
-               map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
-               join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
-       verifyMapBucket(t,
-               ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
-               map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
-               join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
-       verifyMapBucket(t,
-               ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
-               map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
-               join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
-}
similarity index 76%
rename from src/reflect/map_swiss_test.go
rename to src/reflect/map_test.go
index 621140aa60586ec5edb76eeaa7165930b1fc1454..621b5fdd7306bc5a906edead8bcce75c3d34b1f0 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package reflect_test
 
 import (
@@ -11,11 +9,6 @@ import (
        "testing"
 )
 
-func testGCBitsMap(t *testing.T) {
-       // Unlike old maps, we don't manually construct GC data for swiss maps,
-       // instead using the public reflect API in groupAndSlotOf.
-}
-
 // See also runtime_test.TestGroupSizeZero.
 func TestGroupSizeZero(t *testing.T) {
        st := reflect.TypeFor[struct{}]()
diff --git a/src/runtime/export_map_noswiss_test.go b/src/runtime/export_map_noswiss_test.go
deleted file mode 100644 (file)
index 4638afa..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
-       "internal/abi"
-       "unsafe"
-)
-
-const RuntimeHmapSize = unsafe.Sizeof(hmap{})
-
-func OverLoadFactor(count int, B uint8) bool {
-       return overLoadFactor(count, B)
-}
-
-func MapBucketsCount(m map[int]int) int {
-       h := *(**hmap)(unsafe.Pointer(&m))
-       return 1 << h.B
-}
-
-func MapBucketsPointerIsNil(m map[int]int) bool {
-       h := *(**hmap)(unsafe.Pointer(&m))
-       return h.buckets == nil
-}
-
-func MapTombstoneCheck(m map[int]int) {
-       // Make sure emptyOne and emptyRest are distributed correctly.
-       // We should have a series of filled and emptyOne cells, followed by
-       // a series of emptyRest cells.
-       h := *(**hmap)(unsafe.Pointer(&m))
-       i := any(m)
-       t := *(**maptype)(unsafe.Pointer(&i))
-
-       for x := 0; x < 1<<h.B; x++ {
-               b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
-               n := 0
-               for b := b0; b != nil; b = b.overflow(t) {
-                       for i := 0; i < abi.OldMapBucketCount; i++ {
-                               if b.tophash[i] != emptyRest {
-                                       n++
-                               }
-                       }
-               }
-               k := 0
-               for b := b0; b != nil; b = b.overflow(t) {
-                       for i := 0; i < abi.OldMapBucketCount; i++ {
-                               if k < n && b.tophash[i] == emptyRest {
-                                       panic("early emptyRest")
-                               }
-                               if k >= n && b.tophash[i] != emptyRest {
-                                       panic("late non-emptyRest")
-                               }
-                               if k == n-1 && b.tophash[i] == emptyOne {
-                                       panic("last non-emptyRest entry is emptyOne")
-                               }
-                               k++
-                       }
-               }
-       }
-}
diff --git a/src/runtime/export_map_swiss_test.go b/src/runtime/export_map_swiss_test.go
deleted file mode 100644 (file)
index 55a7d6f..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package runtime
-
-func MapTombstoneCheck(m map[int]int) {
-       // TODO
-}
index 3af1362ee2553704b5d82dda03981b44537e1c96..fa30efccb1efe32b0ce6494ed095554b1b24ed9d 100644 (file)
@@ -59,9 +59,6 @@ const CrashStackImplemented = crashStackImplemented
 const TracebackInnerFrames = tracebackInnerFrames
 const TracebackOuterFrames = tracebackOuterFrames
 
-var MapKeys = keys
-var MapValues = values
-
 var LockPartialOrder = lockPartialOrder
 
 type TimeTimer = timeTimer
similarity index 98%
rename from src/runtime/linkname_swiss.go
rename to src/runtime/linkname_shim.go
index 1be724477e5a503b688c5edd4c1703f3cdca55d3..0ceff2b16c7c0374400d972434d1f62c3665012b 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package runtime
 
 import (
@@ -16,8 +14,7 @@ import (
 // Legacy //go:linkname compatibility shims
 //
 // The functions below are unused by the toolchain, and exist only for
-// compatibility with existing //go:linkname use in the ecosystem (and in
-// map_noswiss.go for normal use via GOEXPERIMENT=noswissmap).
+// compatibility with existing //go:linkname use in the ecosystem.
 
 // linknameIter is the it argument to mapiterinit and mapiternext.
 //
@@ -27,7 +24,7 @@ import (
 //     type hiter struct {
 //             key         unsafe.Pointer
 //             elem        unsafe.Pointer
-//             t           *maptype
+//             t           *maptype // old map abi.Type
 //             h           *hmap
 //             buckets     unsafe.Pointer
 //             bptr        *bmap
similarity index 95%
rename from src/runtime/map_swiss.go
rename to src/runtime/map.go
index c2cf08fcaa579854bf2d2bd26213d80dfbd04c58..facf86e494dd35cbc655db551906766dd476d91d 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package runtime
 
 import (
@@ -19,8 +17,6 @@ const (
        loadFactorDen = 8
 )
 
-type maptype = abi.SwissMapType
-
 //go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign
 var maps_errNilAssign error = plainError("assignment to entry in nil map")
 
@@ -331,19 +327,3 @@ func mapclone(m any) any {
        e.data = (unsafe.Pointer)(map_)
        return m
 }
-
-// keys for implementing maps.keys
-//
-//go:linkname keys maps.keys
-func keys(m any, p unsafe.Pointer) {
-       // Currently unused in the maps package.
-       panic("unimplemented")
-}
-
-// values for implementing maps.values
-//
-//go:linkname values maps.values
-func values(m any, p unsafe.Pointer) {
-       // Currently unused in the maps package.
-       panic("unimplemented")
-}
similarity index 98%
rename from src/runtime/map_fast32_swiss.go
rename to src/runtime/map_fast32.go
index 0a241d37936ba7c727e868599cf33d29c1ed9c7c..8b460edf77fba4b2f1dd4c2e9836a2107eac4156 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package runtime
 
 import (
diff --git a/src/runtime/map_fast32_noswiss.go b/src/runtime/map_fast32_noswiss.go
deleted file mode 100644 (file)
index 751717b..0000000
+++ /dev/null
@@ -1,493 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       "internal/runtime/sys"
-       "unsafe"
-)
-
-func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
-       }
-       if h == nil || h.count == 0 {
-               return unsafe.Pointer(&zeroVal[0])
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       var b *bmap
-       if h.B == 0 {
-               // One-bucket table. No need to hash.
-               b = (*bmap)(h.buckets)
-       } else {
-               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-               m := bucketMask(h.B)
-               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-               if c := h.oldbuckets; c != nil {
-                       if !h.sameSizeGrow() {
-                               // There used to be half as many buckets; mask down one more power of two.
-                               m >>= 1
-                       }
-                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-                       if !evacuated(oldb) {
-                               b = oldb
-                       }
-               }
-       }
-       for ; b != nil; b = b.overflow(t) {
-               for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
-                       if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2_fast32 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2_fast32
-func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
-       }
-       if h == nil || h.count == 0 {
-               return unsafe.Pointer(&zeroVal[0]), false
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       var b *bmap
-       if h.B == 0 {
-               // One-bucket table. No need to hash.
-               b = (*bmap)(h.buckets)
-       } else {
-               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-               m := bucketMask(h.B)
-               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-               if c := h.oldbuckets; c != nil {
-                       if !h.sameSizeGrow() {
-                               // There used to be half as many buckets; mask down one more power of two.
-                               m >>= 1
-                       }
-                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-                       if !evacuated(oldb) {
-                               b = oldb
-                       }
-               }
-       }
-       for ; b != nil; b = b.overflow(t) {
-               for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
-                       if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// mapassign_fast32 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast32
-func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
-       if h == nil {
-               panic(plainError("assignment to entry in nil map"))
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapassign.
-       h.flags ^= hashWriting
-
-       if h.buckets == nil {
-               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
-       }
-
-again:
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_fast32(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
-       var insertb *bmap
-       var inserti uintptr
-       var insertk unsafe.Pointer
-
-bucketloop:
-       for {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if isEmpty(b.tophash[i]) {
-                               if insertb == nil {
-                                       inserti = i
-                                       insertb = b
-                               }
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
-                       if k != key {
-                               continue
-                       }
-                       inserti = i
-                       insertb = b
-                       goto done
-               }
-               ovf := b.overflow(t)
-               if ovf == nil {
-                       break
-               }
-               b = ovf
-       }
-
-       // Did not find mapping for key. Allocate new cell & add entry.
-
-       // If we hit the max load factor or we have too many overflow buckets,
-       // and we're not already in the middle of growing, start growing.
-       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
-               hashGrow(t, h)
-               goto again // Growing the table invalidates everything, so try again
-       }
-
-       if insertb == nil {
-               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
-               insertb = h.newoverflow(t, b)
-               inserti = 0 // not necessary, but avoids needlessly spilling inserti
-       }
-       insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
-       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
-       // store new key at insert position
-       *(*uint32)(insertk) = key
-
-       h.count++
-
-done:
-       elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-       return elem
-}
-
-// mapassign_fast32ptr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast32ptr
-func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       if h == nil {
-               panic(plainError("assignment to entry in nil map"))
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapassign.
-       h.flags ^= hashWriting
-
-       if h.buckets == nil {
-               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
-       }
-
-again:
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_fast32(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
-       var insertb *bmap
-       var inserti uintptr
-       var insertk unsafe.Pointer
-
-bucketloop:
-       for {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if isEmpty(b.tophash[i]) {
-                               if insertb == nil {
-                                       inserti = i
-                                       insertb = b
-                               }
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
-                       if k != key {
-                               continue
-                       }
-                       inserti = i
-                       insertb = b
-                       goto done
-               }
-               ovf := b.overflow(t)
-               if ovf == nil {
-                       break
-               }
-               b = ovf
-       }
-
-       // Did not find mapping for key. Allocate new cell & add entry.
-
-       // If we hit the max load factor or we have too many overflow buckets,
-       // and we're not already in the middle of growing, start growing.
-       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
-               hashGrow(t, h)
-               goto again // Growing the table invalidates everything, so try again
-       }
-
-       if insertb == nil {
-               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
-               insertb = h.newoverflow(t, b)
-               inserti = 0 // not necessary, but avoids needlessly spilling inserti
-       }
-       insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
-       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
-       // store new key at insert position
-       *(*unsafe.Pointer)(insertk) = key
-
-       h.count++
-
-done:
-       elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-       return elem
-}
-
-func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
-       }
-       if h == nil || h.count == 0 {
-               return
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-
-       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapdelete
-       h.flags ^= hashWriting
-
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_fast32(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       bOrig := b
-search:
-       for ; b != nil; b = b.overflow(t) {
-               for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
-                       if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
-                               continue
-                       }
-                       // Only clear key if there are pointers in it.
-                       // This can only happen if pointers are 32 bit
-                       // wide as 64 bit pointers do not fit into a 32 bit key.
-                       if goarch.PtrSize == 4 && t.Key.Pointers() {
-                               // The key must be a pointer as we checked pointers are
-                               // 32 bits wide and the key is 32 bits wide also.
-                               *(*unsafe.Pointer)(k) = nil
-                       }
-                       e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
-                       if t.Elem.Pointers() {
-                               memclrHasPointers(e, t.Elem.Size_)
-                       } else {
-                               memclrNoHeapPointers(e, t.Elem.Size_)
-                       }
-                       b.tophash[i] = emptyOne
-                       // If the bucket now ends in a bunch of emptyOne states,
-                       // change those to emptyRest states.
-                       if i == abi.OldMapBucketCount-1 {
-                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
-                                       goto notLast
-                               }
-                       } else {
-                               if b.tophash[i+1] != emptyRest {
-                                       goto notLast
-                               }
-                       }
-                       for {
-                               b.tophash[i] = emptyRest
-                               if i == 0 {
-                                       if b == bOrig {
-                                               break // beginning of initial bucket, we're done.
-                                       }
-                                       // Find previous bucket, continue at its last entry.
-                                       c := b
-                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
-                                       }
-                                       i = abi.OldMapBucketCount - 1
-                               } else {
-                                       i--
-                               }
-                               if b.tophash[i] != emptyOne {
-                                       break
-                               }
-                       }
-               notLast:
-                       h.count--
-                       // Reset the hash seed to make it more difficult for attackers to
-                       // repeatedly trigger hash collisions. See issue 25237.
-                       if h.count == 0 {
-                               h.hash0 = uint32(rand())
-                       }
-                       break search
-               }
-       }
-
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-}
-
-func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
-       // make sure we evacuate the oldbucket corresponding
-       // to the bucket we're about to use
-       evacuate_fast32(t, h, bucket&h.oldbucketmask())
-
-       // evacuate one more oldbucket to make progress on growing
-       if h.growing() {
-               evacuate_fast32(t, h, h.nevacuate)
-       }
-}
-
-func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
-       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
-       newbit := h.noldbuckets()
-       if !evacuated(b) {
-               // TODO: reuse overflow buckets instead of using new ones, if there
-               // is no iterator using the old buckets.  (If !oldIterator.)
-
-               // xy contains the x and y (low and high) evacuation destinations.
-               var xy [2]evacDst
-               x := &xy[0]
-               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
-               x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.e = add(x.k, abi.OldMapBucketCount*4)
-
-               if !h.sameSizeGrow() {
-                       // Only calculate y pointers if we're growing bigger.
-                       // Otherwise GC can see bad pointers.
-                       y := &xy[1]
-                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
-                       y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.e = add(y.k, abi.OldMapBucketCount*4)
-               }
-
-               for ; b != nil; b = b.overflow(t) {
-                       k := add(unsafe.Pointer(b), dataOffset)
-                       e := add(k, abi.OldMapBucketCount*4)
-                       for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
-                               top := b.tophash[i]
-                               if isEmpty(top) {
-                                       b.tophash[i] = evacuatedEmpty
-                                       continue
-                               }
-                               if top < minTopHash {
-                                       throw("bad map state")
-                               }
-                               var useY uint8
-                               if !h.sameSizeGrow() {
-                                       // Compute hash to make our evacuation decision (whether we need
-                                       // to send this key/elem to bucket x or bucket y).
-                                       hash := t.Hasher(k, uintptr(h.hash0))
-                                       if hash&newbit != 0 {
-                                               useY = 1
-                                       }
-                               }
-
-                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
-                               dst := &xy[useY]                 // evacuation destination
-
-                               if dst.i == abi.OldMapBucketCount {
-                                       dst.b = h.newoverflow(t, dst.b)
-                                       dst.i = 0
-                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.e = add(dst.k, abi.OldMapBucketCount*4)
-                               }
-                               dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
-                               // Copy key.
-                               if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
-                                       // Write with a write barrier.
-                                       *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
-                               } else {
-                                       *(*uint32)(dst.k) = *(*uint32)(k)
-                               }
-
-                               typedmemmove(t.Elem, dst.e, e)
-                               dst.i++
-                               // These updates might push these pointers past the end of the
-                               // key or elem arrays.  That's ok, as we have the overflow pointer
-                               // at the end of the bucket to protect against pointing past the
-                               // end of the bucket.
-                               dst.k = add(dst.k, 4)
-                               dst.e = add(dst.e, uintptr(t.ValueSize))
-                       }
-               }
-               // Unlink the overflow buckets & clear key/elem to help GC.
-               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
-                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
-                       // Preserve b.tophash because the evacuation
-                       // state is maintained there.
-                       ptr := add(b, dataOffset)
-                       n := uintptr(t.BucketSize) - dataOffset
-                       memclrHasPointers(ptr, n)
-               }
-       }
-
-       if oldbucket == h.nevacuate {
-               advanceEvacuationMark(h, t, newbit)
-       }
-}
similarity index 98%
rename from src/runtime/map_fast64_swiss.go
rename to src/runtime/map_fast64.go
index 8b7fcf88e8402a690a7ffe84f12ab1ffd3ad54b3..5de22a5beae4783c253fcb0da5eb5439c4236344 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package runtime
 
 import (
diff --git a/src/runtime/map_fast64_noswiss.go b/src/runtime/map_fast64_noswiss.go
deleted file mode 100644 (file)
index abb272d..0000000
+++ /dev/null
@@ -1,502 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       "internal/runtime/sys"
-       "unsafe"
-)
-
-func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
-       }
-       if h == nil || h.count == 0 {
-               return unsafe.Pointer(&zeroVal[0])
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       var b *bmap
-       if h.B == 0 {
-               // One-bucket table. No need to hash.
-               b = (*bmap)(h.buckets)
-       } else {
-               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-               m := bucketMask(h.B)
-               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-               if c := h.oldbuckets; c != nil {
-                       if !h.sameSizeGrow() {
-                               // There used to be half as many buckets; mask down one more power of two.
-                               m >>= 1
-                       }
-                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-                       if !evacuated(oldb) {
-                               b = oldb
-                       }
-               }
-       }
-       for ; b != nil; b = b.overflow(t) {
-               for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
-                       if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2_fast64 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2_fast64
-func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
-       }
-       if h == nil || h.count == 0 {
-               return unsafe.Pointer(&zeroVal[0]), false
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       var b *bmap
-       if h.B == 0 {
-               // One-bucket table. No need to hash.
-               b = (*bmap)(h.buckets)
-       } else {
-               hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-               m := bucketMask(h.B)
-               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-               if c := h.oldbuckets; c != nil {
-                       if !h.sameSizeGrow() {
-                               // There used to be half as many buckets; mask down one more power of two.
-                               m >>= 1
-                       }
-                       oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-                       if !evacuated(oldb) {
-                               b = oldb
-                       }
-               }
-       }
-       for ; b != nil; b = b.overflow(t) {
-               for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
-                       if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// mapassign_fast64 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast64
-func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
-       if h == nil {
-               panic(plainError("assignment to entry in nil map"))
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapassign.
-       h.flags ^= hashWriting
-
-       if h.buckets == nil {
-               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
-       }
-
-again:
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_fast64(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
-       var insertb *bmap
-       var inserti uintptr
-       var insertk unsafe.Pointer
-
-bucketloop:
-       for {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if isEmpty(b.tophash[i]) {
-                               if insertb == nil {
-                                       insertb = b
-                                       inserti = i
-                               }
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
-                       if k != key {
-                               continue
-                       }
-                       insertb = b
-                       inserti = i
-                       goto done
-               }
-               ovf := b.overflow(t)
-               if ovf == nil {
-                       break
-               }
-               b = ovf
-       }
-
-       // Did not find mapping for key. Allocate new cell & add entry.
-
-       // If we hit the max load factor or we have too many overflow buckets,
-       // and we're not already in the middle of growing, start growing.
-       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
-               hashGrow(t, h)
-               goto again // Growing the table invalidates everything, so try again
-       }
-
-       if insertb == nil {
-               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
-               insertb = h.newoverflow(t, b)
-               inserti = 0 // not necessary, but avoids needlessly spilling inserti
-       }
-       insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
-       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
-       // store new key at insert position
-       *(*uint64)(insertk) = key
-
-       h.count++
-
-done:
-       elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-       return elem
-}
-
-// mapassign_fast64ptr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast64ptr
-func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       if h == nil {
-               panic(plainError("assignment to entry in nil map"))
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapassign.
-       h.flags ^= hashWriting
-
-       if h.buckets == nil {
-               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
-       }
-
-again:
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_fast64(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
-       var insertb *bmap
-       var inserti uintptr
-       var insertk unsafe.Pointer
-
-bucketloop:
-       for {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if isEmpty(b.tophash[i]) {
-                               if insertb == nil {
-                                       insertb = b
-                                       inserti = i
-                               }
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
-                       if k != key {
-                               continue
-                       }
-                       insertb = b
-                       inserti = i
-                       goto done
-               }
-               ovf := b.overflow(t)
-               if ovf == nil {
-                       break
-               }
-               b = ovf
-       }
-
-       // Did not find mapping for key. Allocate new cell & add entry.
-
-       // If we hit the max load factor or we have too many overflow buckets,
-       // and we're not already in the middle of growing, start growing.
-       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
-               hashGrow(t, h)
-               goto again // Growing the table invalidates everything, so try again
-       }
-
-       if insertb == nil {
-               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
-               insertb = h.newoverflow(t, b)
-               inserti = 0 // not necessary, but avoids needlessly spilling inserti
-       }
-       insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
-       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
-       // store new key at insert position
-       *(*unsafe.Pointer)(insertk) = key
-
-       h.count++
-
-done:
-       elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-       return elem
-}
-
-func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
-       }
-       if h == nil || h.count == 0 {
-               return
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-
-       hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapdelete
-       h.flags ^= hashWriting
-
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_fast64(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       bOrig := b
-search:
-       for ; b != nil; b = b.overflow(t) {
-               for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
-                       if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
-                               continue
-                       }
-                       // Only clear key if there are pointers in it.
-                       if t.Key.Pointers() {
-                               if goarch.PtrSize == 8 {
-                                       *(*unsafe.Pointer)(k) = nil
-                               } else {
-                                       // There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
-                                       // Just call memclrHasPointers instead of trying to handle all cases here.
-                                       memclrHasPointers(k, 8)
-                               }
-                       }
-                       e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
-                       if t.Elem.Pointers() {
-                               memclrHasPointers(e, t.Elem.Size_)
-                       } else {
-                               memclrNoHeapPointers(e, t.Elem.Size_)
-                       }
-                       b.tophash[i] = emptyOne
-                       // If the bucket now ends in a bunch of emptyOne states,
-                       // change those to emptyRest states.
-                       if i == abi.OldMapBucketCount-1 {
-                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
-                                       goto notLast
-                               }
-                       } else {
-                               if b.tophash[i+1] != emptyRest {
-                                       goto notLast
-                               }
-                       }
-                       for {
-                               b.tophash[i] = emptyRest
-                               if i == 0 {
-                                       if b == bOrig {
-                                               break // beginning of initial bucket, we're done.
-                                       }
-                                       // Find previous bucket, continue at its last entry.
-                                       c := b
-                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
-                                       }
-                                       i = abi.OldMapBucketCount - 1
-                               } else {
-                                       i--
-                               }
-                               if b.tophash[i] != emptyOne {
-                                       break
-                               }
-                       }
-               notLast:
-                       h.count--
-                       // Reset the hash seed to make it more difficult for attackers to
-                       // repeatedly trigger hash collisions. See issue 25237.
-                       if h.count == 0 {
-                               h.hash0 = uint32(rand())
-                       }
-                       break search
-               }
-       }
-
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-}
-
-func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
-       // make sure we evacuate the oldbucket corresponding
-       // to the bucket we're about to use
-       evacuate_fast64(t, h, bucket&h.oldbucketmask())
-
-       // evacuate one more oldbucket to make progress on growing
-       if h.growing() {
-               evacuate_fast64(t, h, h.nevacuate)
-       }
-}
-
-func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
-       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
-       newbit := h.noldbuckets()
-       if !evacuated(b) {
-               // TODO: reuse overflow buckets instead of using new ones, if there
-               // is no iterator using the old buckets.  (If !oldIterator.)
-
-               // xy contains the x and y (low and high) evacuation destinations.
-               var xy [2]evacDst
-               x := &xy[0]
-               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
-               x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.e = add(x.k, abi.OldMapBucketCount*8)
-
-               if !h.sameSizeGrow() {
-                       // Only calculate y pointers if we're growing bigger.
-                       // Otherwise GC can see bad pointers.
-                       y := &xy[1]
-                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
-                       y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.e = add(y.k, abi.OldMapBucketCount*8)
-               }
-
-               for ; b != nil; b = b.overflow(t) {
-                       k := add(unsafe.Pointer(b), dataOffset)
-                       e := add(k, abi.OldMapBucketCount*8)
-                       for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
-                               top := b.tophash[i]
-                               if isEmpty(top) {
-                                       b.tophash[i] = evacuatedEmpty
-                                       continue
-                               }
-                               if top < minTopHash {
-                                       throw("bad map state")
-                               }
-                               var useY uint8
-                               if !h.sameSizeGrow() {
-                                       // Compute hash to make our evacuation decision (whether we need
-                                       // to send this key/elem to bucket x or bucket y).
-                                       hash := t.Hasher(k, uintptr(h.hash0))
-                                       if hash&newbit != 0 {
-                                               useY = 1
-                                       }
-                               }
-
-                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
-                               dst := &xy[useY]                 // evacuation destination
-
-                               if dst.i == abi.OldMapBucketCount {
-                                       dst.b = h.newoverflow(t, dst.b)
-                                       dst.i = 0
-                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.e = add(dst.k, abi.OldMapBucketCount*8)
-                               }
-                               dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
-                               // Copy key.
-                               if t.Key.Pointers() && writeBarrier.enabled {
-                                       if goarch.PtrSize == 8 {
-                                               // Write with a write barrier.
-                                               *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
-                                       } else {
-                                               // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
-                                               // Give up and call typedmemmove.
-                                               typedmemmove(t.Key, dst.k, k)
-                                       }
-                               } else {
-                                       *(*uint64)(dst.k) = *(*uint64)(k)
-                               }
-
-                               typedmemmove(t.Elem, dst.e, e)
-                               dst.i++
-                               // These updates might push these pointers past the end of the
-                               // key or elem arrays.  That's ok, as we have the overflow pointer
-                               // at the end of the bucket to protect against pointing past the
-                               // end of the bucket.
-                               dst.k = add(dst.k, 8)
-                               dst.e = add(dst.e, uintptr(t.ValueSize))
-                       }
-               }
-               // Unlink the overflow buckets & clear key/elem to help GC.
-               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
-                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
-                       // Preserve b.tophash because the evacuation
-                       // state is maintained there.
-                       ptr := add(b, dataOffset)
-                       n := uintptr(t.BucketSize) - dataOffset
-                       memclrHasPointers(ptr, n)
-               }
-       }
-
-       if oldbucket == h.nevacuate {
-               advanceEvacuationMark(h, t, newbit)
-       }
-}
similarity index 97%
rename from src/runtime/map_faststr_swiss.go
rename to src/runtime/map_faststr.go
index 23f6c1e8104f2d8965249950dbd2f80465253b5b..ca16a8eab6205b4d298e8ad20ff691b31fb27a80 100644 (file)
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build goexperiment.swissmap
-
 package runtime
 
 import (
diff --git a/src/runtime/map_faststr_noswiss.go b/src/runtime/map_faststr_noswiss.go
deleted file mode 100644 (file)
index e8b6a3f..0000000
+++ /dev/null
@@ -1,507 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       "internal/runtime/sys"
-       "unsafe"
-)
-
-func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
-       }
-       if h == nil || h.count == 0 {
-               return unsafe.Pointer(&zeroVal[0])
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       key := stringStructOf(&ky)
-       if h.B == 0 {
-               // One-bucket table.
-               b := (*bmap)(h.buckets)
-               if key.len < 32 {
-                       // short key, doing lots of comparisons is ok
-                       for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
-                               k := (*stringStruct)(kptr)
-                               if k.len != key.len || isEmpty(b.tophash[i]) {
-                                       if b.tophash[i] == emptyRest {
-                                               break
-                                       }
-                                       continue
-                               }
-                               if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
-                                       return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
-                               }
-                       }
-                       return unsafe.Pointer(&zeroVal[0])
-               }
-               // long key, try not to do more comparisons than necessary
-               keymaybe := uintptr(abi.OldMapBucketCount)
-               for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
-                       k := (*stringStruct)(kptr)
-                       if k.len != key.len || isEmpty(b.tophash[i]) {
-                               if b.tophash[i] == emptyRest {
-                                       break
-                               }
-                               continue
-                       }
-                       if k.str == key.str {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
-                       }
-                       // check first 4 bytes
-                       if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
-                               continue
-                       }
-                       // check last 4 bytes
-                       if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
-                               continue
-                       }
-                       if keymaybe != abi.OldMapBucketCount {
-                               // Two keys are potential matches. Use hash to distinguish them.
-                               goto dohash
-                       }
-                       keymaybe = i
-               }
-               if keymaybe != abi.OldMapBucketCount {
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
-                       if memequal(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
-                       }
-               }
-               return unsafe.Pointer(&zeroVal[0])
-       }
-dohash:
-       hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-       for ; b != nil; b = b.overflow(t) {
-               for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
-                       k := (*stringStruct)(kptr)
-                       if k.len != key.len || b.tophash[i] != top {
-                               continue
-                       }
-                       if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2_faststr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2_faststr
-func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
-       }
-       if h == nil || h.count == 0 {
-               return unsafe.Pointer(&zeroVal[0]), false
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       key := stringStructOf(&ky)
-       if h.B == 0 {
-               // One-bucket table.
-               b := (*bmap)(h.buckets)
-               if key.len < 32 {
-                       // short key, doing lots of comparisons is ok
-                       for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
-                               k := (*stringStruct)(kptr)
-                               if k.len != key.len || isEmpty(b.tophash[i]) {
-                                       if b.tophash[i] == emptyRest {
-                                               break
-                                       }
-                                       continue
-                               }
-                               if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
-                                       return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
-                               }
-                       }
-                       return unsafe.Pointer(&zeroVal[0]), false
-               }
-               // long key, try not to do more comparisons than necessary
-               keymaybe := uintptr(abi.OldMapBucketCount)
-               for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
-                       k := (*stringStruct)(kptr)
-                       if k.len != key.len || isEmpty(b.tophash[i]) {
-                               if b.tophash[i] == emptyRest {
-                                       break
-                               }
-                               continue
-                       }
-                       if k.str == key.str {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
-                       }
-                       // check first 4 bytes
-                       if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
-                               continue
-                       }
-                       // check last 4 bytes
-                       if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
-                               continue
-                       }
-                       if keymaybe != abi.OldMapBucketCount {
-                               // Two keys are potential matches. Use hash to distinguish them.
-                               goto dohash
-                       }
-                       keymaybe = i
-               }
-               if keymaybe != abi.OldMapBucketCount {
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
-                       if memequal(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
-                       }
-               }
-               return unsafe.Pointer(&zeroVal[0]), false
-       }
-dohash:
-       hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-       for ; b != nil; b = b.overflow(t) {
-               for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
-                       k := (*stringStruct)(kptr)
-                       if k.len != key.len || b.tophash[i] != top {
-                               continue
-                       }
-                       if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
-                               return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// mapassign_faststr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_faststr
-func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
-       if h == nil {
-               panic(plainError("assignment to entry in nil map"))
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-       key := stringStructOf(&s)
-       hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapassign.
-       h.flags ^= hashWriting
-
-       if h.buckets == nil {
-               h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
-       }
-
-again:
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_faststr(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       top := tophash(hash)
-
-       var insertb *bmap
-       var inserti uintptr
-       var insertk unsafe.Pointer
-
-bucketloop:
-       for {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if isEmpty(b.tophash[i]) && insertb == nil {
-                                       insertb = b
-                                       inserti = i
-                               }
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
-                       if k.len != key.len {
-                               continue
-                       }
-                       if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
-                               continue
-                       }
-                       // already have a mapping for key. Update it.
-                       inserti = i
-                       insertb = b
-                       // Overwrite existing key, so it can be garbage collected.
-                       // The size is already guaranteed to be set correctly.
-                       k.str = key.str
-                       goto done
-               }
-               ovf := b.overflow(t)
-               if ovf == nil {
-                       break
-               }
-               b = ovf
-       }
-
-       // Did not find mapping for key. Allocate new cell & add entry.
-
-       // If we hit the max load factor or we have too many overflow buckets,
-       // and we're not already in the middle of growing, start growing.
-       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
-               hashGrow(t, h)
-               goto again // Growing the table invalidates everything, so try again
-       }
-
-       if insertb == nil {
-               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
-               insertb = h.newoverflow(t, b)
-               inserti = 0 // not necessary, but avoids needlessly spilling inserti
-       }
-       insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top // mask inserti to avoid bounds checks
-
-       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
-       // store new key at insert position
-       *((*stringStruct)(insertk)) = *key
-       h.count++
-
-done:
-       elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-       return elem
-}
-
-func mapdelete_faststr(t *maptype, h *hmap, ky string) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
-       }
-       if h == nil || h.count == 0 {
-               return
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-
-       key := stringStructOf(&ky)
-       hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher for consistency with mapdelete
-       h.flags ^= hashWriting
-
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork_faststr(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       bOrig := b
-       top := tophash(hash)
-search:
-       for ; b != nil; b = b.overflow(t) {
-               for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
-                       k := (*stringStruct)(kptr)
-                       if k.len != key.len || b.tophash[i] != top {
-                               continue
-                       }
-                       if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
-                               continue
-                       }
-                       // Clear key's pointer.
-                       k.str = nil
-                       e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
-                       if t.Elem.Pointers() {
-                               memclrHasPointers(e, t.Elem.Size_)
-                       } else {
-                               memclrNoHeapPointers(e, t.Elem.Size_)
-                       }
-                       b.tophash[i] = emptyOne
-                       // If the bucket now ends in a bunch of emptyOne states,
-                       // change those to emptyRest states.
-                       if i == abi.OldMapBucketCount-1 {
-                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
-                                       goto notLast
-                               }
-                       } else {
-                               if b.tophash[i+1] != emptyRest {
-                                       goto notLast
-                               }
-                       }
-                       for {
-                               b.tophash[i] = emptyRest
-                               if i == 0 {
-                                       if b == bOrig {
-                                               break // beginning of initial bucket, we're done.
-                                       }
-                                       // Find previous bucket, continue at its last entry.
-                                       c := b
-                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
-                                       }
-                                       i = abi.OldMapBucketCount - 1
-                               } else {
-                                       i--
-                               }
-                               if b.tophash[i] != emptyOne {
-                                       break
-                               }
-                       }
-               notLast:
-                       h.count--
-                       // Reset the hash seed to make it more difficult for attackers to
-                       // repeatedly trigger hash collisions. See issue 25237.
-                       if h.count == 0 {
-                               h.hash0 = uint32(rand())
-                       }
-                       break search
-               }
-       }
-
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-}
-
-func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
-       // make sure we evacuate the oldbucket corresponding
-       // to the bucket we're about to use
-       evacuate_faststr(t, h, bucket&h.oldbucketmask())
-
-       // evacuate one more oldbucket to make progress on growing
-       if h.growing() {
-               evacuate_faststr(t, h, h.nevacuate)
-       }
-}
-
-func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
-       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
-       newbit := h.noldbuckets()
-       if !evacuated(b) {
-               // TODO: reuse overflow buckets instead of using new ones, if there
-               // is no iterator using the old buckets.  (If !oldIterator.)
-
-               // xy contains the x and y (low and high) evacuation destinations.
-               var xy [2]evacDst
-               x := &xy[0]
-               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
-               x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize)
-
-               if !h.sameSizeGrow() {
-                       // Only calculate y pointers if we're growing bigger.
-                       // Otherwise GC can see bad pointers.
-                       y := &xy[1]
-                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
-                       y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize)
-               }
-
-               for ; b != nil; b = b.overflow(t) {
-                       k := add(unsafe.Pointer(b), dataOffset)
-                       e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize)
-                       for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
-                               top := b.tophash[i]
-                               if isEmpty(top) {
-                                       b.tophash[i] = evacuatedEmpty
-                                       continue
-                               }
-                               if top < minTopHash {
-                                       throw("bad map state")
-                               }
-                               var useY uint8
-                               if !h.sameSizeGrow() {
-                                       // Compute hash to make our evacuation decision (whether we need
-                                       // to send this key/elem to bucket x or bucket y).
-                                       hash := t.Hasher(k, uintptr(h.hash0))
-                                       if hash&newbit != 0 {
-                                               useY = 1
-                                       }
-                               }
-
-                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
-                               dst := &xy[useY]                 // evacuation destination
-
-                               if dst.i == abi.OldMapBucketCount {
-                                       dst.b = h.newoverflow(t, dst.b)
-                                       dst.i = 0
-                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize)
-                               }
-                               dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
-                               // Copy key.
-                               *(*string)(dst.k) = *(*string)(k)
-
-                               typedmemmove(t.Elem, dst.e, e)
-                               dst.i++
-                               // These updates might push these pointers past the end of the
-                               // key or elem arrays.  That's ok, as we have the overflow pointer
-                               // at the end of the bucket to protect against pointing past the
-                               // end of the bucket.
-                               dst.k = add(dst.k, 2*goarch.PtrSize)
-                               dst.e = add(dst.e, uintptr(t.ValueSize))
-                       }
-               }
-               // Unlink the overflow buckets & clear key/elem to help GC.
-               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
-                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
-                       // Preserve b.tophash because the evacuation
-                       // state is maintained there.
-                       ptr := add(b, dataOffset)
-                       n := uintptr(t.BucketSize) - dataOffset
-                       memclrHasPointers(ptr, n)
-               }
-       }
-
-       if oldbucket == h.nevacuate {
-               advanceEvacuationMark(h, t, newbit)
-       }
-}
diff --git a/src/runtime/map_noswiss.go b/src/runtime/map_noswiss.go
deleted file mode 100644 (file)
index 7b3c98e..0000000
+++ /dev/null
@@ -1,1891 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-// This file contains the implementation of Go's map type.
-//
-// A map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/elem pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index).  To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times).  When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and elems)
-//  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
-//        4.00         2.13        20.77         3.00         4.00
-//        4.50         4.05        17.30         3.25         4.50
-//        5.00         6.85        14.77         3.50         5.00
-//        5.50        10.55        12.94         3.75         5.50
-//        6.00        15.27        11.67         4.00         6.00
-//        6.50        20.90        10.79         4.25         6.50
-//        7.00        27.14        10.15         4.50         7.00
-//        7.50        34.03         9.73         4.75         7.50
-//        8.00        41.10         9.40         5.00         8.00
-//
-// %overflow   = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/elem pair
-// hitprobe    = # of entries to check when looking up a present key
-// missprobe   = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       "internal/runtime/atomic"
-       "internal/runtime/maps"
-       "internal/runtime/math"
-       "internal/runtime/sys"
-       "unsafe"
-)
-
-type maptype = abi.OldMapType
-
-const (
-       // Maximum number of key/elem pairs a bucket can hold.
-       bucketCntBits = abi.OldMapBucketCountBits
-
-       // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
-       // Because of minimum alignment rules, bucketCnt is known to be at least 8.
-       // Represent as loadFactorNum/loadFactorDen, to allow integer math.
-       loadFactorDen = 2
-       loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16
-
-       // data offset should be the size of the bmap struct, but needs to be
-       // aligned correctly. For amd64p32 this means 64-bit alignment
-       // even though pointers are 32 bit.
-       dataOffset = unsafe.Offsetof(struct {
-               b bmap
-               v int64
-       }{}.v)
-
-       // Possible tophash values. We reserve a few possibilities for special marks.
-       // Each bucket (including its overflow buckets, if any) will have either all or none of its
-       // entries in the evacuated* states (except during the evacuate() method, which only happens
-       // during map writes and thus no one else can observe the map during that time).
-       emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
-       emptyOne       = 1 // this cell is empty
-       evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
-       evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
-       evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
-       minTopHash     = 5 // minimum tophash for a normal filled cell.
-
-       // flags
-       iterator     = 1 // there may be an iterator using buckets
-       oldIterator  = 2 // there may be an iterator using oldbuckets
-       hashWriting  = 4 // a goroutine is writing to the map
-       sameSizeGrow = 8 // the current map growth is to a new map of the same size
-
-       // sentinel bucket ID for iterator checks
-       noCheck = 1<<(8*goarch.PtrSize) - 1
-)
-
-// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
-func isEmpty(x uint8) bool {
-       return x <= emptyOne
-}
-
-// A header for a Go map.
-type hmap struct {
-       // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
-       // Make sure this stays in sync with the compiler's definition.
-       count     int // # live cells == size of map.  Must be first (used by len() builtin)
-       flags     uint8
-       B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
-       noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
-       hash0     uint32 // hash seed
-
-       buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
-       oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
-       nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
-       clearSeq   uint64
-
-       extra *mapextra // optional fields
-}
-
-// mapextra holds fields that are not present on all maps.
-type mapextra struct {
-       // If both key and elem do not contain pointers and are inline, then we mark bucket
-       // type as containing no pointers. This avoids scanning such maps.
-       // However, bmap.overflow is a pointer. In order to keep overflow buckets
-       // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
-       // overflow and oldoverflow are only used if key and elem do not contain pointers.
-       // overflow contains overflow buckets for hmap.buckets.
-       // oldoverflow contains overflow buckets for hmap.oldbuckets.
-       // The indirection allows to store a pointer to the slice in hiter.
-       overflow    *[]*bmap
-       oldoverflow *[]*bmap
-
-       // nextOverflow holds a pointer to a free overflow bucket.
-       nextOverflow *bmap
-}
-
-// A bucket for a Go map.
-type bmap struct {
-       // tophash generally contains the top byte of the hash value
-       // for each key in this bucket. If tophash[0] < minTopHash,
-       // tophash[0] is a bucket evacuation state instead.
-       tophash [abi.OldMapBucketCount]uint8
-       // Followed by bucketCnt keys and then bucketCnt elems.
-       // NOTE: packing all the keys together and then all the elems together makes the
-       // code a bit more complicated than alternating key/elem/key/elem/... but it allows
-       // us to eliminate padding which would be needed for, e.g., map[int64]int8.
-       // Followed by an overflow pointer.
-}
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
-// and reflect/value.go to match the layout of this structure.
-type hiter struct {
-       key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
-       elem        unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
-       t           *maptype
-       h           *hmap
-       buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
-       bptr        *bmap          // current bucket
-       overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
-       oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
-       startBucket uintptr        // bucket iteration started at
-       offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
-       wrapped     bool           // already wrapped around from end of bucket array to beginning
-       B           uint8
-       i           uint8
-       bucket      uintptr
-       checkBucket uintptr
-       clearSeq    uint64
-}
-
-// bucketShift returns 1<<b, optimized for code generation.
-func bucketShift(b uint8) uintptr {
-       // Masking the shift amount allows overflow checks to be elided.
-       return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
-}
-
-// bucketMask returns 1<<b - 1, optimized for code generation.
-func bucketMask(b uint8) uintptr {
-       return bucketShift(b) - 1
-}
-
-// tophash calculates the tophash value for hash.
-func tophash(hash uintptr) uint8 {
-       top := uint8(hash >> (goarch.PtrSize*8 - 8))
-       if top < minTopHash {
-               top += minTopHash
-       }
-       return top
-}
-
-func evacuated(b *bmap) bool {
-       h := b.tophash[0]
-       return h > emptyOne && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
-       return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
-}
-
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
-       *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
-}
-
-func (b *bmap) keys() unsafe.Pointer {
-       return add(unsafe.Pointer(b), dataOffset)
-}
-
-// incrnoverflow increments h.noverflow.
-// noverflow counts the number of overflow buckets.
-// This is used to trigger same-size map growth.
-// See also tooManyOverflowBuckets.
-// To keep hmap small, noverflow is a uint16.
-// When there are few buckets, noverflow is an exact count.
-// When there are many buckets, noverflow is an approximate count.
-func (h *hmap) incrnoverflow() {
-       // We trigger same-size map growth if there are
-       // as many overflow buckets as buckets.
-       // We need to be able to count to 1<<h.B.
-       if h.B < 16 {
-               h.noverflow++
-               return
-       }
-       // Increment with probability 1/(1<<(h.B-15)).
-       // When we reach 1<<15 - 1, we will have approximately
-       // as many overflow buckets as buckets.
-       mask := uint32(1)<<(h.B-15) - 1
-       // Example: if h.B == 18, then mask == 7,
-       // and rand() & 7 == 0 with probability 1/8.
-       if uint32(rand())&mask == 0 {
-               h.noverflow++
-       }
-}
-
-func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
-       var ovf *bmap
-       if h.extra != nil && h.extra.nextOverflow != nil {
-               // We have preallocated overflow buckets available.
-               // See makeBucketArray for more details.
-               ovf = h.extra.nextOverflow
-               if ovf.overflow(t) == nil {
-                       // We're not at the end of the preallocated overflow buckets. Bump the pointer.
-                       h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
-               } else {
-                       // This is the last preallocated overflow bucket.
-                       // Reset the overflow pointer on this bucket,
-                       // which was set to a non-nil sentinel value.
-                       ovf.setoverflow(t, nil)
-                       h.extra.nextOverflow = nil
-               }
-       } else {
-               ovf = (*bmap)(newobject(t.Bucket))
-       }
-       h.incrnoverflow()
-       if !t.Bucket.Pointers() {
-               h.createOverflow()
-               *h.extra.overflow = append(*h.extra.overflow, ovf)
-       }
-       b.setoverflow(t, ovf)
-       return ovf
-}
-
-func (h *hmap) createOverflow() {
-       if h.extra == nil {
-               h.extra = new(mapextra)
-       }
-       if h.extra.overflow == nil {
-               h.extra.overflow = new([]*bmap)
-       }
-}
-
-func makemap64(t *maptype, hint int64, h *hmap) *hmap {
-       if int64(int(hint)) != hint {
-               hint = 0
-       }
-       return makemap(t, int(hint), h)
-}
-
-// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most bucketCnt
-// at compile time and the map needs to be allocated on the heap.
-//
-// makemap_small should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname makemap_small
-func makemap_small() *hmap {
-       h := new(hmap)
-       h.hash0 = uint32(rand())
-       return h
-}
-
-// makemap implements Go map creation for make(map[k]v, hint).
-// If the compiler has determined that the map or the first bucket
-// can be created on the stack, h and/or bucket may be non-nil.
-// If h != nil, the map can be created directly in h.
-// If h.buckets != nil, bucket pointed to can be used as the first bucket.
-//
-// makemap should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname makemap
-func makemap(t *maptype, hint int, h *hmap) *hmap {
-       mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
-       if overflow || mem > maxAlloc {
-               hint = 0
-       }
-
-       // initialize Hmap
-       if h == nil {
-               h = new(hmap)
-       }
-       h.hash0 = uint32(rand())
-
-       // Find the size parameter B which will hold the requested # of elements.
-       // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
-       B := uint8(0)
-       for overLoadFactor(hint, B) {
-               B++
-       }
-       h.B = B
-
-       // allocate initial hash table
-       // if B == 0, the buckets field is allocated lazily later (in mapassign)
-       // If hint is large zeroing this memory could take a while.
-       if h.B != 0 {
-               var nextOverflow *bmap
-               h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
-               if nextOverflow != nil {
-                       h.extra = new(mapextra)
-                       h.extra.nextOverflow = nextOverflow
-               }
-       }
-
-       return h
-}
-
-// makeBucketArray initializes a backing array for map buckets.
-// 1<<b is the minimum number of buckets to allocate.
-// dirtyalloc should either be nil or a bucket array previously
-// allocated by makeBucketArray with the same t and b parameters.
-// If dirtyalloc is nil a new backing array will be alloced and
-// otherwise dirtyalloc will be cleared and reused as backing array.
-func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
-       base := bucketShift(b)
-       nbuckets := base
-       // For small b, overflow buckets are unlikely.
-       // Avoid the overhead of the calculation.
-       if b >= 4 {
-               // Add on the estimated number of overflow buckets
-               // required to insert the median number of elements
-               // used with this value of b.
-               nbuckets += bucketShift(b - 4)
-               sz := t.Bucket.Size_ * nbuckets
-               up := roundupsize(sz, !t.Bucket.Pointers())
-               if up != sz {
-                       nbuckets = up / t.Bucket.Size_
-               }
-       }
-
-       if dirtyalloc == nil {
-               buckets = newarray(t.Bucket, int(nbuckets))
-       } else {
-               // dirtyalloc was previously generated by
-               // the above newarray(t.Bucket, int(nbuckets))
-               // but may not be empty.
-               buckets = dirtyalloc
-               size := t.Bucket.Size_ * nbuckets
-               if t.Bucket.Pointers() {
-                       memclrHasPointers(buckets, size)
-               } else {
-                       memclrNoHeapPointers(buckets, size)
-               }
-       }
-
-       if base != nbuckets {
-               // We preallocated some overflow buckets.
-               // To keep the overhead of tracking these overflow buckets to a minimum,
-               // we use the convention that if a preallocated overflow bucket's overflow
-               // pointer is nil, then there are more available by bumping the pointer.
-               // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
-               nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
-               last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
-               last.setoverflow(t, (*bmap)(buckets))
-       }
-       return buckets, nextOverflow
-}
-
-// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
-// it will return a reference to the zero object for the elem type if
-// the key is not in the map.
-// NOTE: The returned pointer may keep the whole map live, so don't
-// hold onto it for very long.
-func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(mapaccess1)
-               racereadpc(unsafe.Pointer(h), callerpc, pc)
-               raceReadObjectPC(t.Key, key, callerpc, pc)
-       }
-       if msanenabled && h != nil {
-               msanread(key, t.Key.Size_)
-       }
-       if asanenabled && h != nil {
-               asanread(key, t.Key.Size_)
-       }
-       if h == nil || h.count == 0 {
-               if err := maps.OldMapKeyError(t, key); err != nil {
-                       panic(err) // see issue 23734
-               }
-               return unsafe.Pointer(&zeroVal[0])
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-bucketloop:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if t.Key.Equal(key, k) {
-                               e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       e = *((*unsafe.Pointer)(e))
-                               }
-                               return e
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2
-func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(mapaccess2)
-               racereadpc(unsafe.Pointer(h), callerpc, pc)
-               raceReadObjectPC(t.Key, key, callerpc, pc)
-       }
-       if msanenabled && h != nil {
-               msanread(key, t.Key.Size_)
-       }
-       if asanenabled && h != nil {
-               asanread(key, t.Key.Size_)
-       }
-       if h == nil || h.count == 0 {
-               if err := maps.OldMapKeyError(t, key); err != nil {
-                       panic(err) // see issue 23734
-               }
-               return unsafe.Pointer(&zeroVal[0]), false
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-bucketloop:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if t.Key.Equal(key, k) {
-                               e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       e = *((*unsafe.Pointer)(e))
-                               }
-                               return e, true
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// returns both key and elem. Used by map iterator.
-func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
-       if h == nil || h.count == 0 {
-               return nil, nil
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-bucketloop:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if t.Key.Equal(key, k) {
-                               e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       e = *((*unsafe.Pointer)(e))
-                               }
-                               return k, e
-                       }
-               }
-       }
-       return nil, nil
-}
-
-func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
-       e := mapaccess1(t, h, key)
-       if e == unsafe.Pointer(&zeroVal[0]) {
-               return zero
-       }
-       return e
-}
-
-func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
-       e := mapaccess1(t, h, key)
-       if e == unsafe.Pointer(&zeroVal[0]) {
-               return zero, false
-       }
-       return e, true
-}
-
-// Like mapaccess, but allocates a slot for the key if it is not present in the map.
-//
-// mapassign should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//   - github.com/RomiChan/protobuf
-//   - github.com/segmentio/encoding
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign
-func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       if h == nil {
-               panic(plainError("assignment to entry in nil map"))
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(mapassign)
-               racewritepc(unsafe.Pointer(h), callerpc, pc)
-               raceReadObjectPC(t.Key, key, callerpc, pc)
-       }
-       if msanenabled {
-               msanread(key, t.Key.Size_)
-       }
-       if asanenabled {
-               asanread(key, t.Key.Size_)
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher, since t.hasher may panic,
-       // in which case we have not actually done a write.
-       h.flags ^= hashWriting
-
-       if h.buckets == nil {
-               h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
-       }
-
-again:
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       top := tophash(hash)
-
-       var inserti *uint8
-       var insertk unsafe.Pointer
-       var elem unsafe.Pointer
-bucketloop:
-       for {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if isEmpty(b.tophash[i]) && inserti == nil {
-                                       inserti = &b.tophash[i]
-                                       insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                                       elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               }
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if !t.Key.Equal(key, k) {
-                               continue
-                       }
-                       // already have a mapping for key. Update it.
-                       if t.NeedKeyUpdate() {
-                               typedmemmove(t.Key, k, key)
-                       }
-                       elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                       goto done
-               }
-               ovf := b.overflow(t)
-               if ovf == nil {
-                       break
-               }
-               b = ovf
-       }
-
-       // Did not find mapping for key. Allocate new cell & add entry.
-
-       // If we hit the max load factor or we have too many overflow buckets,
-       // and we're not already in the middle of growing, start growing.
-       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
-               hashGrow(t, h)
-               goto again // Growing the table invalidates everything, so try again
-       }
-
-       if inserti == nil {
-               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
-               newb := h.newoverflow(t, b)
-               inserti = &newb.tophash[0]
-               insertk = add(unsafe.Pointer(newb), dataOffset)
-               elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize))
-       }
-
-       // store new key/elem at insert position
-       if t.IndirectKey() {
-               kmem := newobject(t.Key)
-               *(*unsafe.Pointer)(insertk) = kmem
-               insertk = kmem
-       }
-       if t.IndirectElem() {
-               vmem := newobject(t.Elem)
-               *(*unsafe.Pointer)(elem) = vmem
-       }
-       typedmemmove(t.Key, insertk, key)
-       *inserti = top
-       h.count++
-
-done:
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-       if t.IndirectElem() {
-               elem = *((*unsafe.Pointer)(elem))
-       }
-       return elem
-}
-
-// mapdelete should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapdelete
-func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(mapdelete)
-               racewritepc(unsafe.Pointer(h), callerpc, pc)
-               raceReadObjectPC(t.Key, key, callerpc, pc)
-       }
-       if msanenabled && h != nil {
-               msanread(key, t.Key.Size_)
-       }
-       if asanenabled && h != nil {
-               asanread(key, t.Key.Size_)
-       }
-       if h == nil || h.count == 0 {
-               if err := maps.OldMapKeyError(t, key); err != nil {
-                       panic(err) // see issue 23734
-               }
-               return
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-
-       hash := t.Hasher(key, uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher, since t.hasher may panic,
-       // in which case we have not actually done a write (delete).
-       h.flags ^= hashWriting
-
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       bOrig := b
-       top := tophash(hash)
-search:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break search
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       k2 := k
-                       if t.IndirectKey() {
-                               k2 = *((*unsafe.Pointer)(k2))
-                       }
-                       if !t.Key.Equal(key, k2) {
-                               continue
-                       }
-                       // Only clear key if there are pointers in it.
-                       if t.IndirectKey() {
-                               *(*unsafe.Pointer)(k) = nil
-                       } else if t.Key.Pointers() {
-                               memclrHasPointers(k, t.Key.Size_)
-                       }
-                       e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                       if t.IndirectElem() {
-                               *(*unsafe.Pointer)(e) = nil
-                       } else if t.Elem.Pointers() {
-                               memclrHasPointers(e, t.Elem.Size_)
-                       } else {
-                               memclrNoHeapPointers(e, t.Elem.Size_)
-                       }
-                       b.tophash[i] = emptyOne
-                       // If the bucket now ends in a bunch of emptyOne states,
-                       // change those to emptyRest states.
-                       // It would be nice to make this a separate function, but
-                       // for loops are not currently inlineable.
-                       if i == abi.OldMapBucketCount-1 {
-                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
-                                       goto notLast
-                               }
-                       } else {
-                               if b.tophash[i+1] != emptyRest {
-                                       goto notLast
-                               }
-                       }
-                       for {
-                               b.tophash[i] = emptyRest
-                               if i == 0 {
-                                       if b == bOrig {
-                                               break // beginning of initial bucket, we're done.
-                                       }
-                                       // Find previous bucket, continue at its last entry.
-                                       c := b
-                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
-                                       }
-                                       i = abi.OldMapBucketCount - 1
-                               } else {
-                                       i--
-                               }
-                               if b.tophash[i] != emptyOne {
-                                       break
-                               }
-                       }
-               notLast:
-                       h.count--
-                       // Reset the hash seed to make it more difficult for attackers to
-                       // repeatedly trigger hash collisions. See issue 25237.
-                       if h.count == 0 {
-                               h.hash0 = uint32(rand())
-                       }
-                       break search
-               }
-       }
-
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-}
-
-// mapiterinit initializes the hiter struct used for ranging over maps.
-// The hiter struct pointed to by 'it' is allocated on the stack
-// by the compilers order pass or on the heap by reflect_mapiterinit.
-// Both need to have zeroed hiter since the struct contains pointers.
-//
-// mapiterinit should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//   - github.com/goccy/go-json
-//   - github.com/RomiChan/protobuf
-//   - github.com/segmentio/encoding
-//   - github.com/ugorji/go/codec
-//   - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapiterinit
-func mapiterinit(t *maptype, h *hmap, it *hiter) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
-       }
-
-       it.t = t
-       if h == nil || h.count == 0 {
-               return
-       }
-
-       if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize {
-               throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
-       }
-       it.h = h
-       it.clearSeq = h.clearSeq
-
-       // grab snapshot of bucket state
-       it.B = h.B
-       it.buckets = h.buckets
-       if !t.Bucket.Pointers() {
-               // Allocate the current slice and remember pointers to both current and old.
-               // This preserves all relevant overflow buckets alive even if
-               // the table grows and/or overflow buckets are added to the table
-               // while we are iterating.
-               h.createOverflow()
-               it.overflow = h.extra.overflow
-               it.oldoverflow = h.extra.oldoverflow
-       }
-
-       // decide where to start
-       r := uintptr(rand())
-       it.startBucket = r & bucketMask(h.B)
-       it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1))
-
-       // iterator state
-       it.bucket = it.startBucket
-
-       // Remember we have an iterator.
-       // Can run concurrently with another mapiterinit().
-       if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
-               atomic.Or8(&h.flags, iterator|oldIterator)
-       }
-
-       mapiternext(it)
-}
-
-// mapiternext should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/bytedance/sonic
-//   - github.com/RomiChan/protobuf
-//   - github.com/segmentio/encoding
-//   - github.com/ugorji/go/codec
-//   - gonum.org/v1/gonum
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapiternext
-func mapiternext(it *hiter) {
-       h := it.h
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
-       }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map iteration and map write")
-       }
-       t := it.t
-       bucket := it.bucket
-       b := it.bptr
-       i := it.i
-       checkBucket := it.checkBucket
-
-next:
-       if b == nil {
-               if bucket == it.startBucket && it.wrapped {
-                       // end of iteration
-                       it.key = nil
-                       it.elem = nil
-                       return
-               }
-               if h.growing() && it.B == h.B {
-                       // Iterator was started in the middle of a grow, and the grow isn't done yet.
-                       // If the bucket we're looking at hasn't been filled in yet (i.e. the old
-                       // bucket hasn't been evacuated) then we need to iterate through the old
-                       // bucket and only return the ones that will be migrated to this bucket.
-                       oldbucket := bucket & it.h.oldbucketmask()
-                       b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
-                       if !evacuated(b) {
-                               checkBucket = bucket
-                       } else {
-                               b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
-                               checkBucket = noCheck
-                       }
-               } else {
-                       b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
-                       checkBucket = noCheck
-               }
-               bucket++
-               if bucket == bucketShift(it.B) {
-                       bucket = 0
-                       it.wrapped = true
-               }
-               i = 0
-       }
-       for ; i < abi.OldMapBucketCount; i++ {
-               offi := (i + it.offset) & (abi.OldMapBucketCount - 1)
-               if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
-                       // TODO: emptyRest is hard to use here, as we start iterating
-                       // in the middle of a bucket. It's feasible, just tricky.
-                       continue
-               }
-               k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
-               if t.IndirectKey() {
-                       k = *((*unsafe.Pointer)(k))
-               }
-               e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
-               if checkBucket != noCheck && !h.sameSizeGrow() {
-                       // Special case: iterator was started during a grow to a larger size
-                       // and the grow is not done yet. We're working on a bucket whose
-                       // oldbucket has not been evacuated yet. Or at least, it wasn't
-                       // evacuated when we started the bucket. So we're iterating
-                       // through the oldbucket, skipping any keys that will go
-                       // to the other new bucket (each oldbucket expands to two
-                       // buckets during a grow).
-                       if t.ReflexiveKey() || t.Key.Equal(k, k) {
-                               // If the item in the oldbucket is not destined for
-                               // the current new bucket in the iteration, skip it.
-                               hash := t.Hasher(k, uintptr(h.hash0))
-                               if hash&bucketMask(it.B) != checkBucket {
-                                       continue
-                               }
-                       } else {
-                               // Hash isn't repeatable if k != k (NaNs).  We need a
-                               // repeatable and randomish choice of which direction
-                               // to send NaNs during evacuation. We'll use the low
-                               // bit of tophash to decide which way NaNs go.
-                               // NOTE: this case is why we need two evacuate tophash
-                               // values, evacuatedX and evacuatedY, that differ in
-                               // their low bit.
-                               if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
-                                       continue
-                               }
-                       }
-               }
-               if it.clearSeq == h.clearSeq &&
-                       ((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
-                               !(t.ReflexiveKey() || t.Key.Equal(k, k))) {
-                       // This is the golden data, we can return it.
-                       // OR
-                       // key!=key, so the entry can't be deleted or updated, so we can just return it.
-                       // That's lucky for us because when key!=key we can't look it up successfully.
-                       it.key = k
-                       if t.IndirectElem() {
-                               e = *((*unsafe.Pointer)(e))
-                       }
-                       it.elem = e
-               } else {
-                       // The hash table has grown since the iterator was started.
-                       // The golden data for this key is now somewhere else.
-                       // Check the current hash table for the data.
-                       // This code handles the case where the key
-                       // has been deleted, updated, or deleted and reinserted.
-                       // NOTE: we need to regrab the key as it has potentially been
-                       // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
-                       rk, re := mapaccessK(t, h, k)
-                       if rk == nil {
-                               continue // key has been deleted
-                       }
-                       it.key = rk
-                       it.elem = re
-               }
-               it.bucket = bucket
-               if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
-                       it.bptr = b
-               }
-               it.i = i + 1
-               it.checkBucket = checkBucket
-               return
-       }
-       b = b.overflow(t)
-       i = 0
-       goto next
-}
-
-// mapclear deletes all keys from a map.
-// It is called by the compiler.
-func mapclear(t *maptype, h *hmap) {
-       if raceenabled && h != nil {
-               callerpc := sys.GetCallerPC()
-               pc := abi.FuncPCABIInternal(mapclear)
-               racewritepc(unsafe.Pointer(h), callerpc, pc)
-       }
-
-       if h == nil || h.count == 0 {
-               return
-       }
-
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-
-       h.flags ^= hashWriting
-       h.flags &^= sameSizeGrow
-       h.oldbuckets = nil
-       h.nevacuate = 0
-       h.noverflow = 0
-       h.count = 0
-       h.clearSeq++
-
-       // Reset the hash seed to make it more difficult for attackers to
-       // repeatedly trigger hash collisions. See issue 25237.
-       h.hash0 = uint32(rand())
-
-       // Keep the mapextra allocation but clear any extra information.
-       if h.extra != nil {
-               *h.extra = mapextra{}
-       }
-
-       // makeBucketArray clears the memory pointed to by h.buckets
-       // and recovers any overflow buckets by generating them
-       // as if h.buckets was newly alloced.
-       _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
-       if nextOverflow != nil {
-               // If overflow buckets are created then h.extra
-               // will have been allocated during initial bucket creation.
-               h.extra.nextOverflow = nextOverflow
-       }
-
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-}
-
-func hashGrow(t *maptype, h *hmap) {
-       // If we've hit the load factor, get bigger.
-       // Otherwise, there are too many overflow buckets,
-       // so keep the same number of buckets and "grow" laterally.
-       bigger := uint8(1)
-       if !overLoadFactor(h.count+1, h.B) {
-               bigger = 0
-               h.flags |= sameSizeGrow
-       }
-       oldbuckets := h.buckets
-       newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
-
-       flags := h.flags &^ (iterator | oldIterator)
-       if h.flags&iterator != 0 {
-               flags |= oldIterator
-       }
-       // commit the grow (atomic wrt gc)
-       h.B += bigger
-       h.flags = flags
-       h.oldbuckets = oldbuckets
-       h.buckets = newbuckets
-       h.nevacuate = 0
-       h.noverflow = 0
-
-       if h.extra != nil && h.extra.overflow != nil {
-               // Promote current overflow buckets to the old generation.
-               if h.extra.oldoverflow != nil {
-                       throw("oldoverflow is not nil")
-               }
-               h.extra.oldoverflow = h.extra.overflow
-               h.extra.overflow = nil
-       }
-       if nextOverflow != nil {
-               if h.extra == nil {
-                       h.extra = new(mapextra)
-               }
-               h.extra.nextOverflow = nextOverflow
-       }
-
-       // the actual copying of the hash table data is done incrementally
-       // by growWork() and evacuate().
-}
-
-// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
-func overLoadFactor(count int, B uint8) bool {
-       return count > abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
-}
-
-// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
-// Note that most of these overflow buckets must be in sparse use;
-// if use was dense, then we'd have already triggered regular map growth.
-func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
-       // If the threshold is too low, we do extraneous work.
-       // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
-       // "too many" means (approximately) as many overflow buckets as regular buckets.
-       // See incrnoverflow for more details.
-       if B > 15 {
-               B = 15
-       }
-       // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
-       return noverflow >= uint16(1)<<(B&15)
-}
-
-// growing reports whether h is growing. The growth may be to the same size or bigger.
-func (h *hmap) growing() bool {
-       return h.oldbuckets != nil
-}
-
-// sameSizeGrow reports whether the current growth is to a map of the same size.
-func (h *hmap) sameSizeGrow() bool {
-       return h.flags&sameSizeGrow != 0
-}
-
-//go:linkname sameSizeGrowForIssue69110Test
-func sameSizeGrowForIssue69110Test(h *hmap) bool {
-       return h.sameSizeGrow()
-}
-
-// noldbuckets calculates the number of buckets prior to the current map growth.
-func (h *hmap) noldbuckets() uintptr {
-       oldB := h.B
-       if !h.sameSizeGrow() {
-               oldB--
-       }
-       return bucketShift(oldB)
-}
-
-// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
-func (h *hmap) oldbucketmask() uintptr {
-       return h.noldbuckets() - 1
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
-       // make sure we evacuate the oldbucket corresponding
-       // to the bucket we're about to use
-       evacuate(t, h, bucket&h.oldbucketmask())
-
-       // evacuate one more oldbucket to make progress on growing
-       if h.growing() {
-               evacuate(t, h, h.nevacuate)
-       }
-}
-
-func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
-       b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
-       return evacuated(b)
-}
-
-// evacDst is an evacuation destination.
-type evacDst struct {
-       b *bmap          // current destination bucket
-       i int            // key/elem index into b
-       k unsafe.Pointer // pointer to current key storage
-       e unsafe.Pointer // pointer to current elem storage
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
-       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
-       newbit := h.noldbuckets()
-       if !evacuated(b) {
-               // TODO: reuse overflow buckets instead of using new ones, if there
-               // is no iterator using the old buckets.  (If !oldIterator.)
-
-               // xy contains the x and y (low and high) evacuation destinations.
-               var xy [2]evacDst
-               x := &xy[0]
-               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
-               x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize))
-
-               if !h.sameSizeGrow() {
-                       // Only calculate y pointers if we're growing bigger.
-                       // Otherwise GC can see bad pointers.
-                       y := &xy[1]
-                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
-                       y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize))
-               }
-
-               for ; b != nil; b = b.overflow(t) {
-                       k := add(unsafe.Pointer(b), dataOffset)
-                       e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize))
-                       for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
-                               top := b.tophash[i]
-                               if isEmpty(top) {
-                                       b.tophash[i] = evacuatedEmpty
-                                       continue
-                               }
-                               if top < minTopHash {
-                                       throw("bad map state")
-                               }
-                               k2 := k
-                               if t.IndirectKey() {
-                                       k2 = *((*unsafe.Pointer)(k2))
-                               }
-                               var useY uint8
-                               if !h.sameSizeGrow() {
-                                       // Compute hash to make our evacuation decision (whether we need
-                                       // to send this key/elem to bucket x or bucket y).
-                                       hash := t.Hasher(k2, uintptr(h.hash0))
-                                       if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
-                                               // If key != key (NaNs), then the hash could be (and probably
-                                               // will be) entirely different from the old hash. Moreover,
-                                               // it isn't reproducible. Reproducibility is required in the
-                                               // presence of iterators, as our evacuation decision must
-                                               // match whatever decision the iterator made.
-                                               // Fortunately, we have the freedom to send these keys either
-                                               // way. Also, tophash is meaningless for these kinds of keys.
-                                               // We let the low bit of tophash drive the evacuation decision.
-                                               // We recompute a new random tophash for the next level so
-                                               // these keys will get evenly distributed across all buckets
-                                               // after multiple grows.
-                                               useY = top & 1
-                                               top = tophash(hash)
-                                       } else {
-                                               if hash&newbit != 0 {
-                                                       useY = 1
-                                               }
-                                       }
-                               }
-
-                               if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
-                                       throw("bad evacuatedN")
-                               }
-
-                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
-                               dst := &xy[useY]                 // evacuation destination
-
-                               if dst.i == abi.OldMapBucketCount {
-                                       dst.b = h.newoverflow(t, dst.b)
-                                       dst.i = 0
-                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize))
-                               }
-                               dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-                               if t.IndirectKey() {
-                                       *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
-                               } else {
-                                       typedmemmove(t.Key, dst.k, k) // copy elem
-                               }
-                               if t.IndirectElem() {
-                                       *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
-                               } else {
-                                       typedmemmove(t.Elem, dst.e, e)
-                               }
-                               dst.i++
-                               // These updates might push these pointers past the end of the
-                               // key or elem arrays.  That's ok, as we have the overflow pointer
-                               // at the end of the bucket to protect against pointing past the
-                               // end of the bucket.
-                               dst.k = add(dst.k, uintptr(t.KeySize))
-                               dst.e = add(dst.e, uintptr(t.ValueSize))
-                       }
-               }
-               // Unlink the overflow buckets & clear key/elem to help GC.
-               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
-                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
-                       // Preserve b.tophash because the evacuation
-                       // state is maintained there.
-                       ptr := add(b, dataOffset)
-                       n := uintptr(t.BucketSize) - dataOffset
-                       memclrHasPointers(ptr, n)
-               }
-       }
-
-       if oldbucket == h.nevacuate {
-               advanceEvacuationMark(h, t, newbit)
-       }
-}
-
-func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
-       h.nevacuate++
-       // Experiments suggest that 1024 is overkill by at least an order of magnitude.
-       // Put it in there as a safeguard anyway, to ensure O(1) behavior.
-       stop := h.nevacuate + 1024
-       if stop > newbit {
-               stop = newbit
-       }
-       for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
-               h.nevacuate++
-       }
-       if h.nevacuate == newbit { // newbit == # of oldbuckets
-               // Growing is all done. Free old main bucket array.
-               h.oldbuckets = nil
-               // Can discard old overflow buckets as well.
-               // If they are still referenced by an iterator,
-               // then the iterator holds a pointers to the slice.
-               if h.extra != nil {
-                       h.extra.oldoverflow = nil
-               }
-               h.flags &^= sameSizeGrow
-       }
-}
-
-// Reflect stubs. Called from ../reflect/asm_*.s
-
-// reflect_makemap is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - gitee.com/quant1x/gox
-//   - github.com/modern-go/reflect2
-//   - github.com/goccy/go-json
-//   - github.com/RomiChan/protobuf
-//   - github.com/segmentio/encoding
-//   - github.com/v2pro/plz
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *maptype, cap int) *hmap {
-       // Check invariants and reflects math.
-       if t.Key.Equal == nil {
-               throw("runtime.reflect_makemap: unsupported map key type")
-       }
-       if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
-               t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
-               throw("key size wrong")
-       }
-       if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
-               t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
-               throw("elem size wrong")
-       }
-       if t.Key.Align_ > abi.OldMapBucketCount {
-               throw("key align too big")
-       }
-       if t.Elem.Align_ > abi.OldMapBucketCount {
-               throw("elem align too big")
-       }
-       if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
-               throw("key size not a multiple of key align")
-       }
-       if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
-               throw("elem size not a multiple of elem align")
-       }
-       if abi.OldMapBucketCount < 8 {
-               throw("bucketsize too small for proper alignment")
-       }
-       if dataOffset%uintptr(t.Key.Align_) != 0 {
-               throw("need padding in bucket (key)")
-       }
-       if dataOffset%uintptr(t.Elem.Align_) != 0 {
-               throw("need padding in bucket (elem)")
-       }
-
-       return makemap(t, cap, nil)
-}
-
-// reflect_mapaccess is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - gitee.com/quant1x/gox
-//   - github.com/modern-go/reflect2
-//   - github.com/v2pro/plz
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       elem, ok := mapaccess2(t, h, key)
-       if !ok {
-               // reflect wants nil for a missing element
-               elem = nil
-       }
-       return elem
-}
-
-//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
-func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
-       elem, ok := mapaccess2_faststr(t, h, key)
-       if !ok {
-               // reflect wants nil for a missing element
-               elem = nil
-       }
-       return elem
-}
-
-// reflect_mapassign is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - gitee.com/quant1x/gox
-//   - github.com/v2pro/plz
-//
-// Do not remove or change the type signature.
-//
-//go:linkname reflect_mapassign reflect.mapassign0
-func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
-       p := mapassign(t, h, key)
-       typedmemmove(t.Elem, p, elem)
-}
-
-//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
-func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
-       p := mapassign_faststr(t, h, key)
-       typedmemmove(t.Elem, p, elem)
-}
-
-//go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
-       mapdelete(t, h, key)
-}
-
-//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
-func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
-       mapdelete_faststr(t, h, key)
-}
-
-// reflect_mapiterinit is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/modern-go/reflect2
-//   - gitee.com/quant1x/gox
-//   - github.com/v2pro/plz
-//   - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
-       mapiterinit(t, h, it)
-}
-
-// reflect_mapiternext is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - gitee.com/quant1x/gox
-//   - github.com/modern-go/reflect2
-//   - github.com/goccy/go-json
-//   - github.com/v2pro/plz
-//   - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiternext reflect.mapiternext
-func reflect_mapiternext(it *hiter) {
-       mapiternext(it)
-}
-
-// reflect_mapiterkey was for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/goccy/go-json
-//   - gonum.org/v1/gonum
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiterkey reflect.mapiterkey
-func reflect_mapiterkey(it *hiter) unsafe.Pointer {
-       return it.key
-}
-
-// reflect_mapiterelem was for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/goccy/go-json
-//   - gonum.org/v1/gonum
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiterelem reflect.mapiterelem
-func reflect_mapiterelem(it *hiter) unsafe.Pointer {
-       return it.elem
-}
-
-// reflect_maplen is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-//   - github.com/goccy/go-json
-//   - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_maplen reflect.maplen
-func reflect_maplen(h *hmap) int {
-       if h == nil {
-               return 0
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
-       }
-       return h.count
-}
-
-//go:linkname reflect_mapclear reflect.mapclear
-func reflect_mapclear(t *maptype, h *hmap) {
-       mapclear(t, h)
-}
-
-//go:linkname reflectlite_maplen internal/reflectlite.maplen
-func reflectlite_maplen(h *hmap) int {
-       if h == nil {
-               return 0
-       }
-       if raceenabled {
-               callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
-       }
-       return h.count
-}
-
-// mapinitnoop is a no-op function known the Go linker; if a given global
-// map (of the right size) is determined to be dead, the linker will
-// rewrite the relocation (from the package init func) from the outlined
-// map init function to this symbol. Defined in assembly so as to avoid
-// complications with instrumentation (coverage, etc).
-func mapinitnoop()
-
-// mapclone for implementing maps.Clone
-//
-//go:linkname mapclone maps.clone
-func mapclone(m any) any {
-       e := efaceOf(&m)
-       e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
-       return m
-}
-
-// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
-// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
-func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
-       for i := 0; i < abi.OldMapBucketCount; i++ {
-               if isEmpty(src.tophash[i]) {
-                       continue
-               }
-
-               for ; pos < abi.OldMapBucketCount; pos++ {
-                       if isEmpty(dst.tophash[pos]) {
-                               break
-                       }
-               }
-
-               if pos == abi.OldMapBucketCount {
-                       dst = h.newoverflow(t, dst)
-                       pos = 0
-               }
-
-               srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
-               srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
-               dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
-               dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
-
-               dst.tophash[pos] = src.tophash[i]
-               if t.IndirectKey() {
-                       srcK = *(*unsafe.Pointer)(srcK)
-                       if t.NeedKeyUpdate() {
-                               kStore := newobject(t.Key)
-                               typedmemmove(t.Key, kStore, srcK)
-                               srcK = kStore
-                       }
-                       // Note: if NeedKeyUpdate is false, then the memory
-                       // used to store the key is immutable, so we can share
-                       // it between the original map and its clone.
-                       *(*unsafe.Pointer)(dstK) = srcK
-               } else {
-                       typedmemmove(t.Key, dstK, srcK)
-               }
-               if t.IndirectElem() {
-                       srcEle = *(*unsafe.Pointer)(srcEle)
-                       eStore := newobject(t.Elem)
-                       typedmemmove(t.Elem, eStore, srcEle)
-                       *(*unsafe.Pointer)(dstEle) = eStore
-               } else {
-                       typedmemmove(t.Elem, dstEle, srcEle)
-               }
-               pos++
-               h.count++
-       }
-       return dst, pos
-}
-
-func mapclone2(t *maptype, src *hmap) *hmap {
-       hint := src.count
-       if overLoadFactor(hint, src.B) {
-               // Note: in rare cases (e.g. during a same-sized grow) the map
-               // can be overloaded. Make sure we don't allocate a destination
-               // bucket array larger than the source bucket array.
-               // This will cause the cloned map to be overloaded also,
-               // but that's better than crashing. See issue 69110.
-               hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
-       }
-       dst := makemap(t, hint, nil)
-       dst.hash0 = src.hash0
-       dst.nevacuate = 0
-       // flags do not need to be copied here, just like a new map has no flags.
-
-       if src.count == 0 {
-               return dst
-       }
-
-       if src.flags&hashWriting != 0 {
-               fatal("concurrent map clone and map write")
-       }
-
-       if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
-               // Quick copy for small maps.
-               dst.buckets = newobject(t.Bucket)
-               dst.count = src.count
-               typedmemmove(t.Bucket, dst.buckets, src.buckets)
-               return dst
-       }
-
-       if dst.B == 0 {
-               dst.buckets = newobject(t.Bucket)
-       }
-       dstArraySize := int(bucketShift(dst.B))
-       srcArraySize := int(bucketShift(src.B))
-       for i := 0; i < dstArraySize; i++ {
-               dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
-               pos := 0
-               for j := 0; j < srcArraySize; j += dstArraySize {
-                       srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
-                       for srcBmap != nil {
-                               dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
-                               srcBmap = srcBmap.overflow(t)
-                       }
-               }
-       }
-
-       if src.oldbuckets == nil {
-               return dst
-       }
-
-       oldB := src.B
-       srcOldbuckets := src.oldbuckets
-       if !src.sameSizeGrow() {
-               oldB--
-       }
-       oldSrcArraySize := int(bucketShift(oldB))
-
-       for i := 0; i < oldSrcArraySize; i++ {
-               srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
-               if evacuated(srcBmap) {
-                       continue
-               }
-
-               if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
-                       dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
-                       for dstBmap.overflow(t) != nil {
-                               dstBmap = dstBmap.overflow(t)
-                       }
-                       pos := 0
-                       for srcBmap != nil {
-                               dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
-                               srcBmap = srcBmap.overflow(t)
-                       }
-                       continue
-               }
-
-               // oldB < dst.B, so a single source bucket may go to multiple destination buckets.
-               // Process entries one at a time.
-               for srcBmap != nil {
-                       // move from oldBlucket to new bucket
-                       for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                               if isEmpty(srcBmap.tophash[i]) {
-                                       continue
-                               }
-
-                               if src.flags&hashWriting != 0 {
-                                       fatal("concurrent map clone and map write")
-                               }
-
-                               srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
-                               if t.IndirectKey() {
-                                       srcK = *((*unsafe.Pointer)(srcK))
-                               }
-
-                               srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       srcEle = *((*unsafe.Pointer)(srcEle))
-                               }
-                               dstEle := mapassign(t, dst, srcK)
-                               typedmemmove(t.Elem, dstEle, srcEle)
-                       }
-                       srcBmap = srcBmap.overflow(t)
-               }
-       }
-       return dst
-}
-
-// keys for implementing maps.keys
-//
-//go:linkname keys maps.keys
-func keys(m any, p unsafe.Pointer) {
-       e := efaceOf(&m)
-       t := (*maptype)(unsafe.Pointer(e._type))
-       h := (*hmap)(e.data)
-
-       if h == nil || h.count == 0 {
-               return
-       }
-       s := (*slice)(p)
-       r := int(rand())
-       offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
-       if h.B == 0 {
-               copyKeys(t, h, (*bmap)(h.buckets), s, offset)
-               return
-       }
-       arraySize := int(bucketShift(h.B))
-       buckets := h.buckets
-       for i := 0; i < arraySize; i++ {
-               bucket := (i + r) & (arraySize - 1)
-               b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
-               copyKeys(t, h, b, s, offset)
-       }
-
-       if h.growing() {
-               oldArraySize := int(h.noldbuckets())
-               for i := 0; i < oldArraySize; i++ {
-                       bucket := (i + r) & (oldArraySize - 1)
-                       b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
-                       if evacuated(b) {
-                               continue
-                       }
-                       copyKeys(t, h, b, s, offset)
-               }
-       }
-       return
-}
-
-func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
-       for b != nil {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
-                       if isEmpty(b.tophash[offi]) {
-                               continue
-                       }
-                       if h.flags&hashWriting != 0 {
-                               fatal("concurrent map read and map write")
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if s.len >= s.cap {
-                               fatal("concurrent map read and map write")
-                       }
-                       typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
-                       s.len++
-               }
-               b = b.overflow(t)
-       }
-}
-
-// values for implementing maps.values
-//
-//go:linkname values maps.values
-func values(m any, p unsafe.Pointer) {
-       e := efaceOf(&m)
-       t := (*maptype)(unsafe.Pointer(e._type))
-       h := (*hmap)(e.data)
-       if h == nil || h.count == 0 {
-               return
-       }
-       s := (*slice)(p)
-       r := int(rand())
-       offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
-       if h.B == 0 {
-               copyValues(t, h, (*bmap)(h.buckets), s, offset)
-               return
-       }
-       arraySize := int(bucketShift(h.B))
-       buckets := h.buckets
-       for i := 0; i < arraySize; i++ {
-               bucket := (i + r) & (arraySize - 1)
-               b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
-               copyValues(t, h, b, s, offset)
-       }
-
-       if h.growing() {
-               oldArraySize := int(h.noldbuckets())
-               for i := 0; i < oldArraySize; i++ {
-                       bucket := (i + r) & (oldArraySize - 1)
-                       b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
-                       if evacuated(b) {
-                               continue
-                       }
-                       copyValues(t, h, b, s, offset)
-               }
-       }
-       return
-}
-
-func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
-       for b != nil {
-               for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
-                       offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
-                       if isEmpty(b.tophash[offi]) {
-                               continue
-                       }
-
-                       if h.flags&hashWriting != 0 {
-                               fatal("concurrent map read and map write")
-                       }
-
-                       ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
-                       if t.IndirectElem() {
-                               ele = *((*unsafe.Pointer)(ele))
-                       }
-                       if s.len >= s.cap {
-                               fatal("concurrent map read and map write")
-                       }
-                       typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
-                       s.len++
-               }
-               b = b.overflow(t)
-       }
-}
diff --git a/src/runtime/map_noswiss_test.go b/src/runtime/map_noswiss_test.go
deleted file mode 100644 (file)
index 5af7b7b..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime_test
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       "runtime"
-       "slices"
-       "testing"
-)
-
-func TestHmapSize(t *testing.T) {
-       // The structure of hmap is defined in runtime/map.go
-       // and in cmd/compile/internal/reflectdata/map.go and must be in sync.
-       // The size of hmap should be 56 bytes on 64 bit and 36 bytes on 32 bit platforms.
-       var hmapSize = uintptr(2*8 + 5*goarch.PtrSize)
-       if runtime.RuntimeHmapSize != hmapSize {
-               t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
-       }
-}
-
-func TestLoadFactor(t *testing.T) {
-       for b := uint8(0); b < 20; b++ {
-               count := 13 * (1 << b) / 2 // 6.5
-               if b == 0 {
-                       count = 8
-               }
-               if runtime.OverLoadFactor(count, b) {
-                       t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
-               }
-               if !runtime.OverLoadFactor(count+1, b) {
-                       t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
-               }
-       }
-}
-
-func TestMapIterOrder(t *testing.T) {
-       sizes := []int{3, 7, 9, 15}
-       if abi.OldMapBucketCountBits >= 5 {
-               // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
-               t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.OldMapBucketCountBits)
-       }
-       for _, n := range sizes {
-               for i := 0; i < 1000; i++ {
-                       // Make m be {0: true, 1: true, ..., n-1: true}.
-                       m := make(map[int]bool)
-                       for i := 0; i < n; i++ {
-                               m[i] = true
-                       }
-                       // Check that iterating over the map produces at least two different orderings.
-                       ord := func() []int {
-                               var s []int
-                               for key := range m {
-                                       s = append(s, key)
-                               }
-                               return s
-                       }
-                       first := ord()
-                       ok := false
-                       for try := 0; try < 100; try++ {
-                               if !slices.Equal(first, ord()) {
-                                       ok = true
-                                       break
-                               }
-                       }
-                       if !ok {
-                               t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
-                               break
-                       }
-               }
-       }
-}
-
-const bs = abi.OldMapBucketCount
-
-// belowOverflow should be a pretty-full pair of buckets;
-// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
-// that are 13/16 full each, which is the overflow boundary.
-// Adding one to that should ensure overflow to the next higher size.
-const (
-       belowOverflow = bs * 3 / 2           // 1.5 bs = 2 buckets @ 75%
-       atOverflow    = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
-)
-
-var mapBucketTests = [...]struct {
-       n        int // n is the number of map elements
-       noescape int // number of expected buckets for non-escaping map
-       escape   int // number of expected buckets for escaping map
-}{
-       {-(1 << 30), 1, 1},
-       {-1, 1, 1},
-       {0, 1, 1},
-       {1, 1, 1},
-       {bs, 1, 1},
-       {bs + 1, 2, 2},
-       {belowOverflow, 2, 2},  // 1.5 bs = 2 buckets @ 75%
-       {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
-
-       {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
-       {2*atOverflow + 1, 8, 8},  // 13/4 bs + 1 = overflow to 8
-
-       {4 * belowOverflow, 8, 8},  // 6 bs = 8 buckets @ 75%
-       {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
-}
-
-func TestMapBuckets(t *testing.T) {
-       // Test that maps of different sizes have the right number of buckets.
-       // Non-escaping maps with small buckets (like map[int]int) never
-       // have a nil bucket pointer due to starting with preallocated buckets
-       // on the stack. Escaping maps start with a non-nil bucket pointer if
-       // hint size is above bucketCnt and thereby have more than one bucket.
-       // These tests depend on bucketCnt and loadFactor* in map.go.
-       t.Run("mapliteral", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := map[int]int{}
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(map[int]int{})
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
-       t.Run("nohint", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := make(map[int]int)
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(make(map[int]int))
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
-       t.Run("makemap", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := make(map[int]int, tt.n)
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(make(map[int]int, tt.n))
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
-       t.Run("makemap64", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := make(map[int]int, int64(tt.n))
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(make(map[int]int, tt.n))
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
-}
diff --git a/src/runtime/map_swiss_test.go b/src/runtime/map_swiss_test.go
deleted file mode 100644 (file)
index d5c9fdb..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package runtime_test
-
-import (
-       "internal/abi"
-       "internal/goarch"
-       "internal/runtime/maps"
-       "slices"
-       "testing"
-       "unsafe"
-)
-
-func TestHmapSize(t *testing.T) {
-       // The structure of Map is defined in internal/runtime/maps/map.go
-       // and in cmd/compile/internal/reflectdata/map_swiss.go and must be in sync.
-       // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms.
-       wantSize := uintptr(2*8 + 4*goarch.PtrSize)
-       gotSize := unsafe.Sizeof(maps.Map{})
-       if gotSize != wantSize {
-               t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
-       }
-}
-
-// See also reflect_test.TestGroupSizeZero.
-func TestGroupSizeZero(t *testing.T) {
-       var m map[struct{}]struct{}
-       mTyp := abi.TypeOf(m)
-       mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
-
-       // internal/runtime/maps when create pointers to slots, even if slots
-       // are size 0. The compiler should have reserved an extra word to
-       // ensure that pointers to the zero-size type at the end of group are
-       // valid.
-       if mt.Group.Size() <= 8 {
-               t.Errorf("Group size got %d want >8", mt.Group.Size())
-       }
-}
-
-func TestMapIterOrder(t *testing.T) {
-       sizes := []int{3, 7, 9, 15}
-       for _, n := range sizes {
-               for i := 0; i < 1000; i++ {
-                       // Make m be {0: true, 1: true, ..., n-1: true}.
-                       m := make(map[int]bool)
-                       for i := 0; i < n; i++ {
-                               m[i] = true
-                       }
-                       // Check that iterating over the map produces at least two different orderings.
-                       ord := func() []int {
-                               var s []int
-                               for key := range m {
-                                       s = append(s, key)
-                               }
-                               return s
-                       }
-                       first := ord()
-                       ok := false
-                       for try := 0; try < 100; try++ {
-                               if !slices.Equal(first, ord()) {
-                                       ok = true
-                                       break
-                               }
-                       }
-                       if !ok {
-                               t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
-                               break
-                       }
-               }
-       }
-}
index b1ff02d851e916b72d35593549c7c80d505806ac..ff7fbeb2308df9b339c34e36ffb6a9201b9c2e03 100644 (file)
@@ -6,7 +6,9 @@ package runtime_test
 
 import (
        "fmt"
-       "internal/goexperiment"
+       "internal/abi"
+       "internal/goarch"
+       "internal/runtime/maps"
        "internal/testenv"
        "math"
        "os"
@@ -812,31 +814,6 @@ func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) {
        }
 }
 
-func TestMapTombstones(t *testing.T) {
-       m := map[int]int{}
-       const N = 10000
-       // Fill a map.
-       for i := 0; i < N; i++ {
-               m[i] = i
-       }
-       runtime.MapTombstoneCheck(m)
-       // Delete half of the entries.
-       for i := 0; i < N; i += 2 {
-               delete(m, i)
-       }
-       runtime.MapTombstoneCheck(m)
-       // Add new entries to fill in holes.
-       for i := N; i < 3*N/2; i++ {
-               m[i] = i
-       }
-       runtime.MapTombstoneCheck(m)
-       // Delete everything.
-       for i := 0; i < 3*N/2; i++ {
-               delete(m, i)
-       }
-       runtime.MapTombstoneCheck(m)
-}
-
 type canString int
 
 func (c canString) String() string {
@@ -1060,44 +1037,6 @@ func TestEmptyMapWithInterfaceKey(t *testing.T) {
        })
 }
 
-func TestMapKeys(t *testing.T) {
-       if goexperiment.SwissMap {
-               t.Skip("mapkeys not implemented for swissmaps")
-       }
-
-       type key struct {
-               s   string
-               pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes
-       }
-       m := map[key]int{{s: "a"}: 1, {s: "b"}: 2}
-       keys := make([]key, 0, len(m))
-       runtime.MapKeys(m, unsafe.Pointer(&keys))
-       for _, k := range keys {
-               if len(k.s) != 1 {
-                       t.Errorf("len(k.s) == %d, want 1", len(k.s))
-               }
-       }
-}
-
-func TestMapValues(t *testing.T) {
-       if goexperiment.SwissMap {
-               t.Skip("mapvalues not implemented for swissmaps")
-       }
-
-       type val struct {
-               s   string
-               pad [128]byte // sizeof(val) > abi.MapMaxElemBytes
-       }
-       m := map[int]val{1: {s: "a"}, 2: {s: "b"}}
-       vals := make([]val, 0, len(m))
-       runtime.MapValues(m, unsafe.Pointer(&vals))
-       for _, v := range vals {
-               if len(v.s) != 1 {
-                       t.Errorf("len(v.s) == %d, want 1", len(v.s))
-               }
-       }
-}
-
 func computeHash() uintptr {
        var v struct{}
        return runtime.MemHash(unsafe.Pointer(&v), 0, unsafe.Sizeof(v))
@@ -1202,3 +1141,62 @@ func TestMapIterDeleteReplace(t *testing.T) {
                })
        }
 }
+
+func TestHmapSize(t *testing.T) {
+       // The structure of Map is defined in internal/runtime/maps/map.go
+       // and in cmd/compile/internal/reflectdata/map.go and must be in sync.
+       // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms.
+       wantSize := uintptr(2*8 + 4*goarch.PtrSize)
+       gotSize := unsafe.Sizeof(maps.Map{})
+       if gotSize != wantSize {
+               t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
+       }
+}
+
+// See also reflect_test.TestGroupSizeZero.
+func TestGroupSizeZero(t *testing.T) {
+       var m map[struct{}]struct{}
+       mTyp := abi.TypeOf(m)
+       mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
+
+       // internal/runtime/maps when create pointers to slots, even if slots
+       // are size 0. The compiler should have reserved an extra word to
+       // ensure that pointers to the zero-size type at the end of group are
+       // valid.
+       if mt.Group.Size() <= 8 {
+               t.Errorf("Group size got %d want >8", mt.Group.Size())
+       }
+}
+
+func TestMapIterOrder(t *testing.T) {
+       sizes := []int{3, 7, 9, 15}
+       for _, n := range sizes {
+               for i := 0; i < 1000; i++ {
+                       // Make m be {0: true, 1: true, ..., n-1: true}.
+                       m := make(map[int]bool)
+                       for i := 0; i < n; i++ {
+                               m[i] = true
+                       }
+                       // Check that iterating over the map produces at least two different orderings.
+                       ord := func() []int {
+                               var s []int
+                               for key := range m {
+                                       s = append(s, key)
+                               }
+                               return s
+                       }
+                       first := ord()
+                       ok := false
+                       for try := 0; try < 100; try++ {
+                               if !slices.Equal(first, ord()) {
+                                       ok = true
+                                       break
+                               }
+                       }
+                       if !ok {
+                               t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
+                               break
+                       }
+               }
+       }
+}
index 6d9951517639bc7081e7b7856b03cc23a805e335..23b1f331c84e61c1aa2212c69c9cce822be018c3 100644 (file)
@@ -160,13 +160,6 @@ class MapTypePrinter:
                return str(self.val.type)
 
        def children(self):
-               fields = [f.name for f in self.val.type.strip_typedefs().target().fields()]
-               if 'buckets' in fields:
-                       yield from self.old_map_children()
-               else:
-                       yield from self.swiss_map_children()
-
-       def swiss_map_children(self):
                SwissMapGroupSlots = 8 # see internal/abi:SwissMapGroupSlots
 
                cnt = 0
@@ -270,40 +263,6 @@ class MapTypePrinter:
                                yield from group_slots(group)
 
 
-       def old_map_children(self):
-               MapBucketCount = 8 # see internal/abi:OldMapBucketCount
-               B = self.val['B']
-               buckets = self.val['buckets']
-               oldbuckets = self.val['oldbuckets']
-               flags = self.val['flags']
-               inttype = self.val['hash0'].type
-               cnt = 0
-               for bucket in xrange(2 ** int(B)):
-                       bp = buckets + bucket
-                       if oldbuckets:
-                               oldbucket = bucket & (2 ** (B - 1) - 1)
-                               oldbp = oldbuckets + oldbucket
-                               oldb = oldbp.dereference()
-                               if (oldb['overflow'].cast(inttype) & 1) == 0:  # old bucket not evacuated yet
-                                       if bucket >= 2 ** (B - 1):
-                                               continue    # already did old bucket
-                                       bp = oldbp
-                       while bp:
-                               b = bp.dereference()
-                               for i in xrange(MapBucketCount):
-                                       if b['tophash'][i] != 0:
-                                               k = b['keys'][i]
-                                               v = b['values'][i]
-                                               if flags & 1:
-                                                       k = k.dereference()
-                                               if flags & 2:
-                                                       v = v.dereference()
-                                               yield str(cnt), k
-                                               yield str(cnt + 1), v
-                                               cnt += 2
-                               bp = b['overflow']
-
-
 class ChanTypePrinter:
        """Pretty print chan[T] types.
 
index 47c1fe58515cf26b2bfe0a83d47af36f19207d02..e81efadeb311b0334f6d483517424703d428bf35 100644 (file)
@@ -8,8 +8,6 @@ import (
        "bytes"
        "flag"
        "fmt"
-       "internal/abi"
-       "internal/goexperiment"
        "internal/testenv"
        "os"
        "os/exec"
@@ -155,9 +153,6 @@ func checkPtraceScope(t *testing.T) {
        }
 }
 
-// NOTE: the maps below are allocated larger than abi.MapBucketCount
-// to ensure that they are not "optimized out".
-
 var helloSource = `
 import "fmt"
 import "runtime"
@@ -166,19 +161,21 @@ var gslice []string
 var smallmapvar map[string]string
 func main() {
        smallmapvar = make(map[string]string)
-       mapvar := make(map[string]string, ` + strconv.FormatInt(abi.OldMapBucketCount+9, 10) + `)
-       slicemap := make(map[string][]string,` + strconv.FormatInt(abi.OldMapBucketCount+3, 10) + `)
-    chanint := make(chan int, 10)
-    chanstr := make(chan string, 10)
-    chanint <- 99
+       // NOTE: the maps below are allocated large to ensure that they are not
+       // "optimized out".
+       mapvar := make(map[string]string, 10)
+       slicemap := make(map[string][]string, 10)
+       chanint := make(chan int, 10)
+       chanstr := make(chan string, 10)
+       chanint <- 99
        chanint <- 11
-    chanstr <- "spongepants"
-    chanstr <- "squarebob"
+       chanstr <- "spongepants"
+       chanstr <- "squarebob"
        smallmapvar["abc"] = "def"
        mapvar["abc"] = "def"
        mapvar["ghi"] = "jkl"
        slicemap["a"] = []string{"b","c","d"}
-    slicemap["e"] = []string{"f","g","h"}
+       slicemap["e"] = []string{"f","g","h"}
        strvar := "abc"
        ptrvar := &strvar
        slicevar := make([]string, 0, 16)
@@ -638,20 +635,10 @@ func TestGdbAutotmpTypes(t *testing.T) {
        types := []string{
                "[]main.astruct",
                "main.astruct",
-       }
-       if goexperiment.SwissMap {
-               types = append(types, []string{
-                       "groupReference<string,main.astruct>",
-                       "table<string,main.astruct>",
-                       "map<string,main.astruct>",
-                       "map<string,main.astruct> * map[string]main.astruct",
-               }...)
-       } else {
-               types = append(types, []string{
-                       "bucket<string,main.astruct>",
-                       "hash<string,main.astruct>",
-                       "hash<string,main.astruct> * map[string]main.astruct",
-               }...)
+               "groupReference<string,main.astruct>",
+               "table<string,main.astruct>",
+               "map<string,main.astruct>",
+               "map<string,main.astruct> * map[string]main.astruct",
        }
        for _, name := range types {
                if !strings.Contains(sgot, name) {
index baaaeafc4a3550091384260df19503a787bcdbdb..636ea3a48505745c1ec921270471a835c2945bf0 100644 (file)
@@ -9,7 +9,6 @@ package runtime
 import (
        "internal/abi"
        "internal/goarch"
-       "internal/goexperiment"
        "internal/runtime/atomic"
        "unsafe"
 )
@@ -605,13 +604,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
                }
                return true
        case abi.Map:
-               if goexperiment.SwissMap {
-                       mt := (*abi.SwissMapType)(unsafe.Pointer(t))
-                       mv := (*abi.SwissMapType)(unsafe.Pointer(v))
-                       return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
-               }
-               mt := (*abi.OldMapType)(unsafe.Pointer(t))
-               mv := (*abi.OldMapType)(unsafe.Pointer(v))
+               mt := (*abi.SwissMapType)(unsafe.Pointer(t))
+               mv := (*abi.SwissMapType)(unsafe.Pointer(v))
                return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
        case abi.Pointer:
                pt := (*ptrtype)(unsafe.Pointer(t))
diff --git a/test/fixedbugs/issue69110.go b/test/fixedbugs/issue69110.go
deleted file mode 100644 (file)
index ab51d0b..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-// run
-
-//go:build !goexperiment.swissmap
-
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-       "maps"
-       _ "unsafe"
-)
-
-func main() {
-       for i := 0; i < 100; i++ {
-               f()
-       }
-}
-
-const NB = 4
-
-func f() {
-       // Make a map with NB buckets, at max capacity.
-       // 6.5 entries/bucket.
-       ne := NB * 13 / 2
-       m := map[int]int{}
-       for i := 0; i < ne; i++ {
-               m[i] = i
-       }
-
-       // delete/insert a lot, to hopefully get lots of overflow buckets
-       // and trigger a same-size grow.
-       ssg := false
-       for i := ne; i < ne+1000; i++ {
-               delete(m, i-ne)
-               m[i] = i
-               if sameSizeGrow(m) {
-                       ssg = true
-                       break
-               }
-       }
-       if !ssg {
-               return
-       }
-
-       // Insert 1 more entry, which would ordinarily trigger a growth.
-       // We can't grow while growing, so we instead go over our
-       // target capacity.
-       m[-1] = -1
-
-       // Cloning in this state will make a map with a destination bucket
-       // array twice the size of the source.
-       _ = maps.Clone(m)
-}
-
-//go:linkname sameSizeGrow runtime.sameSizeGrowForIssue69110Test
-func sameSizeGrow(m map[int]int) bool
index 357ac537ad9d259bda303cb05aa555338ddfd37b..37ee5b9123dd6de8ec5e7011dcc5c811a4c70b26 100644 (file)
@@ -1,4 +1,4 @@
-// run -goexperiment noswissmap
+// run
 
 // Copyright 2024 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
index 46e5e3e757cd60578c22c46e3ae100ce302b981a..6e1a60557c8ad4149f5f0a84500f6938bf771737 100644 (file)
@@ -492,7 +492,7 @@ func f28(b bool) {
 
 func f29(b bool) {
        if b {
-               for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$"
+               for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Iter$"
                        printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
                }
        }
@@ -695,7 +695,7 @@ func newT40() *T40 {
 
 func good40() {
        ret := T40{}              // ERROR "stack object ret T40$"
-       ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
+       ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
        t := &ret
        printnl() // ERROR "live at call to printnl: ret$"
        // Note: ret is live at the printnl because the compiler moves &ret
index 313caa6fb882a12602a8cb6d1318e2c47773f3a0..d88ee6df2ce348ac0ce6e9620f0ea38eade211b6 100644 (file)
@@ -27,14 +27,14 @@ func newT40() *T40 {
 }
 
 func bad40() {
-       t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
+       t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
        printnl()     // ERROR "live at call to printnl: ret$"
        useT40(t)
 }
 
 func good40() {
        ret := T40{}                  // ERROR "stack object ret T40$"
-       ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
+       ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
        t := &ret
        printnl() // ERROR "live at call to printnl: ret$"
        useT40(t)
index ddb4caed1a53e69be00926b192c51a3ebaf8e885..d2565ba9130cd075532917a9ec18a4522c9744e6 100644 (file)
@@ -490,7 +490,7 @@ func f28(b bool) {
 
 func f29(b bool) {
        if b {
-               for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$"
+               for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Iter$"
                        printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
                }
        }
@@ -693,7 +693,7 @@ func newT40() *T40 {
 
 func good40() {
        ret := T40{}              // ERROR "stack object ret T40$"
-       ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$"
+       ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$"
        t := &ret
        printnl() // ERROR "live at call to printnl: ret$"
        // Note: ret is live at the printnl because the compiler moves &ret