]> Cypherpunks repositories - gostls13.git/commitdiff
all: wire up swisstable maps
authorMichael Pratt <mpratt@google.com>
Fri, 3 May 2024 17:03:04 +0000 (13:03 -0400)
committerMichael Pratt <mpratt@google.com>
Mon, 14 Oct 2024 19:58:47 +0000 (19:58 +0000)
Use the new SwissTable-based map in internal/runtime/maps as the basis
for the runtime map when GOEXPERIMENT=swissmap.

Integration is complete enough to pass all.bash. Notable missing
features:

* Race integration / concurrent write detection
* Stack-allocated maps
* Specialized "fast" map variants
* Indirect key / elem

For #54766.

Cq-Include-Trybots: luci.golang.try:gotip-linux-ppc64_power10,gotip-linux-amd64-longtest-swissmap
Change-Id: Ie97b656b6d8e05c0403311ae08fef9f51756a639
Reviewed-on: https://go-review.googlesource.com/c/go/+/594596
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>

50 files changed:
src/cmd/compile/internal/gc/main.go
src/cmd/compile/internal/ir/symtab.go
src/cmd/compile/internal/reflectdata/map_swiss.go
src/cmd/compile/internal/ssagen/ssa.go
src/cmd/compile/internal/test/inl_test.go
src/cmd/compile/internal/types/fmt.go
src/cmd/compile/internal/types/type.go
src/cmd/compile/internal/walk/builtin.go
src/cmd/compile/internal/walk/range.go
src/cmd/go/internal/test/test.go
src/cmd/internal/objabi/path_test.go
src/cmd/internal/objabi/pkgspecial.go
src/cmd/link/internal/ld/deadcode.go
src/cmd/link/internal/ld/dwarf.go
src/go/build/deps_test.go
src/internal/abi/map_noswiss.go
src/internal/abi/map_swiss.go
src/internal/coverage/pkid.go
src/internal/runtime/maps/export_noswiss_test.go [new file with mode: 0644]
src/internal/runtime/maps/export_swiss_test.go [new file with mode: 0644]
src/internal/runtime/maps/export_test.go
src/internal/runtime/maps/group.go
src/internal/runtime/maps/internal/abi/map_swiss.go [deleted file]
src/internal/runtime/maps/map_test.go
src/internal/runtime/maps/table.go
src/internal/runtime/maps/table_debug.go
src/reflect/all_test.go
src/reflect/export_noswiss_test.go [new file with mode: 0644]
src/reflect/export_swiss_test.go [new file with mode: 0644]
src/reflect/export_test.go
src/reflect/map_noswiss_test.go [new file with mode: 0644]
src/reflect/map_swiss.go
src/reflect/map_swiss_test.go [new file with mode: 0644]
src/runtime/export_map_noswiss_test.go
src/runtime/export_map_swiss_test.go
src/runtime/export_test.go
src/runtime/map_fast32_swiss.go
src/runtime/map_fast64_swiss.go
src/runtime/map_faststr_swiss.go
src/runtime/map_noswiss_test.go
src/runtime/map_swiss.go
src/runtime/map_swiss_test.go
src/runtime/map_test.go
src/runtime/runtime-gdb_test.go
test/fixedbugs/issue69110.go
test/live.go
test/live2.go
test/live_regabi.go
test/live_regabi_noswiss.go
test/live_regabi_swiss.go

index 174c609e44521714179d872c3ce718713ef7eed2..c922fa9a9a5307d82311c2a158d64242ae4fd653 100644 (file)
@@ -104,6 +104,13 @@ func Main(archInit func(*ssagen.ArchInfo)) {
        ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
        ir.Pkgs.Runtime.Prefix = "runtime"
 
+       if buildcfg.Experiment.SwissMap {
+               // Pseudo-package that contains the compiler's builtin
+               // declarations for maps.
+               ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps")
+               ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps"
+       }
+
        // pseudo-packages used in symbol tables
        ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
        ir.Pkgs.Itab.Prefix = "go:itab"
index a2a263d3ce25f0c9a784d81d4c9740115fb62da0..3cdef102302066139996c599e03c184c2a8cff7b 100644 (file)
@@ -73,8 +73,9 @@ type symsStruct struct {
 
 // Pkgs holds known packages.
 var Pkgs struct {
-       Go       *types.Pkg
-       Itab     *types.Pkg
-       Runtime  *types.Pkg
-       Coverage *types.Pkg
+       Go           *types.Pkg
+       Itab         *types.Pkg
+       Runtime      *types.Pkg
+       InternalMaps *types.Pkg
+       Coverage     *types.Pkg
 }
index 4fed93517e6b4be23fc3a7e9221c7b6f35454572..2525c0cf2cd3d545b4a03ef859406a2d6f69720b 100644 (file)
@@ -6,7 +6,6 @@ package reflectdata
 
 import (
        "internal/abi"
-
        "cmd/compile/internal/base"
        "cmd/compile/internal/ir"
        "cmd/compile/internal/rttype"
@@ -16,161 +15,100 @@ import (
        "cmd/internal/src"
 )
 
-// SwissMapBucketType makes the map bucket type given the type of the map.
-func SwissMapBucketType(t *types.Type) *types.Type {
-       // Builds a type representing a Bucket structure for
-       // the given map type. This type is not visible to users -
-       // we include only enough information to generate a correct GC
-       // program for it.
-       // Make sure this stays in sync with runtime/map.go.
-       //
-       //      A "bucket" is a "struct" {
-       //            tophash [abi.SwissMapBucketCount]uint8
-       //            keys [abi.SwissMapBucketCount]keyType
-       //            elems [abi.SwissMapBucketCount]elemType
-       //            overflow *bucket
-       //          }
-       if t.MapType().SwissBucket != nil {
-               return t.MapType().SwissBucket
+// SwissMapGroupType makes the map slot group type given the type of the map.
+func SwissMapGroupType(t *types.Type) *types.Type {
+       if t.MapType().SwissGroup != nil {
+               return t.MapType().SwissGroup
        }
 
-       keytype := t.Key()
-       elemtype := t.Elem()
-       types.CalcSize(keytype)
-       types.CalcSize(elemtype)
-       if keytype.Size() > abi.SwissMapMaxKeyBytes {
-               keytype = types.NewPtr(keytype)
-       }
-       if elemtype.Size() > abi.SwissMapMaxElemBytes {
-               elemtype = types.NewPtr(elemtype)
+       // Builds a type representing a group structure for the given map type.
+       // This type is not visible to users, we include it so we can generate
+       // a correct GC program for it.
+       //
+       // Make sure this stays in sync with internal/runtime/maps/group.go.
+       //
+       // type group struct {
+       //     ctrl uint64
+       //     slots [abi.SwissMapGroupSlots]struct {
+       //         key  keyType
+       //         elem elemType
+       //     }
+       // }
+       slotFields := []*types.Field{
+               makefield("key", t.Key()),
+               makefield("typ", t.Elem()),
        }
+       slot := types.NewStruct(slotFields)
+       slot.SetNoalg(true)
 
-       field := make([]*types.Field, 0, 5)
+       slotArr := types.NewArray(slot, abi.SwissMapGroupSlots)
+       slotArr.SetNoalg(true)
 
-       // The first field is: uint8 topbits[BUCKETSIZE].
-       arr := types.NewArray(types.Types[types.TUINT8], abi.SwissMapBucketCount)
-       field = append(field, makefield("topbits", arr))
-
-       arr = types.NewArray(keytype, abi.SwissMapBucketCount)
-       arr.SetNoalg(true)
-       keys := makefield("keys", arr)
-       field = append(field, keys)
-
-       arr = types.NewArray(elemtype, abi.SwissMapBucketCount)
-       arr.SetNoalg(true)
-       elems := makefield("elems", arr)
-       field = append(field, elems)
-
-       // If keys and elems have no pointers, the map implementation
-       // can keep a list of overflow pointers on the side so that
-       // buckets can be marked as having no pointers.
-       // Arrange for the bucket to have no pointers by changing
-       // the type of the overflow field to uintptr in this case.
-       // See comment on hmap.overflow in runtime/map.go.
-       otyp := types.Types[types.TUNSAFEPTR]
-       if !elemtype.HasPointers() && !keytype.HasPointers() {
-               otyp = types.Types[types.TUINTPTR]
+       fields := []*types.Field{
+               makefield("ctrl", types.Types[types.TUINT64]),
+               makefield("slots", slotArr),
        }
-       overflow := makefield("overflow", otyp)
-       field = append(field, overflow)
 
-       // link up fields
-       bucket := types.NewStruct(field[:])
-       bucket.SetNoalg(true)
-       types.CalcSize(bucket)
+       group := types.NewStruct(fields)
+       group.SetNoalg(true)
+       types.CalcSize(group)
 
        // Check invariants that map code depends on.
        if !types.IsComparable(t.Key()) {
                base.Fatalf("unsupported map key type for %v", t)
        }
-       if abi.SwissMapBucketCount < 8 {
-               base.Fatalf("bucket size %d too small for proper alignment %d", abi.SwissMapBucketCount, 8)
-       }
-       if uint8(keytype.Alignment()) > abi.SwissMapBucketCount {
-               base.Fatalf("key align too big for %v", t)
-       }
-       if uint8(elemtype.Alignment()) > abi.SwissMapBucketCount {
-               base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.SwissMapBucketCount)
-       }
-       if keytype.Size() > abi.SwissMapMaxKeyBytes {
-               base.Fatalf("key size too large for %v", t)
-       }
-       if elemtype.Size() > abi.SwissMapMaxElemBytes {
-               base.Fatalf("elem size too large for %v", t)
-       }
-       if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() {
-               base.Fatalf("key indirect incorrect for %v", t)
-       }
-       if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() {
-               base.Fatalf("elem indirect incorrect for %v", t)
-       }
-       if keytype.Size()%keytype.Alignment() != 0 {
-               base.Fatalf("key size not a multiple of key align for %v", t)
-       }
-       if elemtype.Size()%elemtype.Alignment() != 0 {
-               base.Fatalf("elem size not a multiple of elem align for %v", t)
-       }
-       if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
-               base.Fatalf("bucket align not multiple of key align %v", t)
-       }
-       if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
-               base.Fatalf("bucket align not multiple of elem align %v", t)
-       }
-       if keys.Offset%keytype.Alignment() != 0 {
-               base.Fatalf("bad alignment of keys in bmap for %v", t)
-       }
-       if elems.Offset%elemtype.Alignment() != 0 {
-               base.Fatalf("bad alignment of elems in bmap for %v", t)
-       }
-
-       // Double-check that overflow field is final memory in struct,
-       // with no padding at end.
-       if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
-               base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
-                       t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
+       if group.Size() <= 8 {
+               // internal/runtime/maps creates pointers to slots, even if
+               // both key and elem are size zero. In this case, each slot is
+               // size 0, but group should still reserve a word of padding at
+               // the end to ensure pointers are valid.
+               base.Fatalf("bad group size for %v", t)
        }
 
-       t.MapType().SwissBucket = bucket
-
-       bucket.StructType().Map = t
-       return bucket
+       t.MapType().SwissGroup = group
+       group.StructType().Map = t
+       return group
 }
 
 var swissHmapType *types.Type
 
-// SwissMapType returns a type interchangeable with runtime.hmap.
-// Make sure this stays in sync with runtime/map.go.
+// SwissMapType returns a type interchangeable with internal/runtime/maps.Map.
+// Make sure this stays in sync with internal/runtime/maps/map.go.
 func SwissMapType() *types.Type {
        if swissHmapType != nil {
                return swissHmapType
        }
 
        // build a struct:
-       // type hmap struct {
-       //    count      int
-       //    flags      uint8
-       //    B          uint8
-       //    noverflow  uint16
-       //    hash0      uint32
-       //    buckets    unsafe.Pointer
-       //    oldbuckets unsafe.Pointer
-       //    nevacuate  uintptr
-       //    extra      unsafe.Pointer // *mapextra
+       // type table struct {
+       //     used uint64
+       //     typ  unsafe.Pointer // *abi.SwissMapType
+       //     seed uintptr
+       //
+       //     // From groups.
+       //     groups_typ        unsafe.Pointer // *abi.SwissMapType
+       //     groups_data       unsafe.Pointer
+       //     groups_lengthMask uint64
+       //
+       //     capacity   uint64
+       //     growthLeft uint64
+       //
+       //     clearSeq uint64
        // }
-       // must match runtime/map.go:hmap.
+       // must match internal/runtime/maps/map.go:Map.
        fields := []*types.Field{
-               makefield("count", types.Types[types.TINT]),
-               makefield("flags", types.Types[types.TUINT8]),
-               makefield("B", types.Types[types.TUINT8]),
-               makefield("noverflow", types.Types[types.TUINT16]),
-               makefield("hash0", types.Types[types.TUINT32]),      // Used in walk.go for OMAKEMAP.
-               makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
-               makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
-               makefield("nevacuate", types.Types[types.TUINTPTR]),
-               makefield("extra", types.Types[types.TUNSAFEPTR]),
-       }
-
-       n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
+               makefield("used", types.Types[types.TUINT64]),
+               makefield("typ", types.Types[types.TUNSAFEPTR]),
+               makefield("seed", types.Types[types.TUINTPTR]),
+               makefield("groups_typ", types.Types[types.TUNSAFEPTR]),
+               makefield("groups_data", types.Types[types.TUNSAFEPTR]),
+               makefield("groups_lengthMask", types.Types[types.TUINT64]),
+               makefield("capacity", types.Types[types.TUINT64]),
+               makefield("growthLeft", types.Types[types.TUINT64]),
+               makefield("clearSeq", types.Types[types.TUINT64]),
+       }
+
+       n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("table"))
        hmap := types.NewNamed(n)
        n.SetType(hmap)
        n.SetTypecheck(1)
@@ -178,10 +116,10 @@ func SwissMapType() *types.Type {
        hmap.SetUnderlying(types.NewStruct(fields))
        types.CalcSize(hmap)
 
-       // The size of hmap should be 48 bytes on 64 bit
-       // and 28 bytes on 32 bit platforms.
-       if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
-               base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
+       // The size of Map should be 64 bytes on 64 bit
+       // and 48 bytes on 32 bit platforms.
+       if size := int64(5*8 + 4*types.PtrSize); hmap.Size() != size {
+               base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", hmap.Size(), size)
        }
 
        swissHmapType = hmap
@@ -200,52 +138,54 @@ func SwissMapIterType() *types.Type {
        hmap := SwissMapType()
 
        // build a struct:
-       // type hiter struct {
-       //    key         unsafe.Pointer // *Key
-       //    elem        unsafe.Pointer // *Elem
-       //    t           unsafe.Pointer // *SwissMapType
-       //    h           *hmap
-       //    buckets     unsafe.Pointer
-       //    bptr        unsafe.Pointer // *bmap
-       //    overflow    unsafe.Pointer // *[]*bmap
-       //    oldoverflow unsafe.Pointer // *[]*bmap
-       //    startBucket uintptr
-       //    offset      uint8
-       //    wrapped     bool
-       //    B           uint8
-       //    i           uint8
-       //    bucket      uintptr
-       //    checkBucket uintptr
+       // type Iter struct {
+       //    key      unsafe.Pointer // *Key
+       //    elem     unsafe.Pointer // *Elem
+       //    typ      unsafe.Pointer // *SwissMapType
+       //    m        *Map
+       //
+       //    // From groups.
+       //    groups_typ        unsafe.Pointer // *abi.SwissMapType
+       //    groups_data       unsafe.Pointer
+       //    groups_lengthMask uint64
+       //
+       //    clearSeq uint64
+       //
+       //    offset   uint64
+       //    groupIdx uint64
+       //    slotIdx  uint32
+       //
+       //    // 4 bytes of padding on 64-bit arches.
        // }
-       // must match runtime/map.go:hiter.
+       // must match internal/runtime/maps/table.go:Iter.
        fields := []*types.Field{
                makefield("key", types.Types[types.TUNSAFEPTR]),  // Used in range.go for TMAP.
                makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
-               makefield("t", types.Types[types.TUNSAFEPTR]),
-               makefield("h", types.NewPtr(hmap)),
-               makefield("buckets", types.Types[types.TUNSAFEPTR]),
-               makefield("bptr", types.Types[types.TUNSAFEPTR]),
-               makefield("overflow", types.Types[types.TUNSAFEPTR]),
-               makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
-               makefield("startBucket", types.Types[types.TUINTPTR]),
-               makefield("offset", types.Types[types.TUINT8]),
-               makefield("wrapped", types.Types[types.TBOOL]),
-               makefield("B", types.Types[types.TUINT8]),
-               makefield("i", types.Types[types.TUINT8]),
-               makefield("bucket", types.Types[types.TUINTPTR]),
-               makefield("checkBucket", types.Types[types.TUINTPTR]),
+               makefield("typ", types.Types[types.TUNSAFEPTR]),
+               makefield("m", types.NewPtr(hmap)),
+               makefield("groups_typ", types.Types[types.TUNSAFEPTR]),
+               makefield("groups_data", types.Types[types.TUNSAFEPTR]),
+               makefield("groups_lengthMask", types.Types[types.TUINT64]),
+               makefield("clearSeq", types.Types[types.TUINT64]),
+               makefield("offset", types.Types[types.TUINT64]),
+               makefield("groupIdx", types.Types[types.TUINT64]),
+               makefield("slotIdx", types.Types[types.TUINT32]),
        }
 
        // build iterator struct hswissing the above fields
-       n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
+       n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("Iter"))
        hiter := types.NewNamed(n)
        n.SetType(hiter)
        n.SetTypecheck(1)
 
        hiter.SetUnderlying(types.NewStruct(fields))
        types.CalcSize(hiter)
-       if hiter.Size() != int64(12*types.PtrSize) {
-               base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
+       want := 6*types.PtrSize + 4*8 + 1*4
+       if types.PtrSize == 8 {
+               want += 4 // tailing padding
+       }
+       if hiter.Size() != int64(want) {
+               base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), want)
        }
 
        swissHiterType = hiter
@@ -254,40 +194,27 @@ func SwissMapIterType() *types.Type {
 
 func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
        // internal/abi.SwissMapType
+       gtyp := SwissMapGroupType(t)
        s1 := writeType(t.Key())
        s2 := writeType(t.Elem())
-       s3 := writeType(SwissMapBucketType(t))
+       s3 := writeType(gtyp)
        hasher := genhash(t.Key())
 
+       slotTyp := gtyp.Field(1).Type.Elem()
+       elemOff := slotTyp.Field(1).Offset
+
        c.Field("Key").WritePtr(s1)
        c.Field("Elem").WritePtr(s2)
-       c.Field("Bucket").WritePtr(s3)
+       c.Field("Group").WritePtr(s3)
        c.Field("Hasher").WritePtr(hasher)
+       c.Field("SlotSize").WriteUintptr(uint64(slotTyp.Size()))
+       c.Field("ElemOff").WriteUintptr(uint64(elemOff))
        var flags uint32
-       // Note: flags must match maptype accessors in ../../../../runtime/type.go
-       // and maptype builder in ../../../../reflect/type.go:MapOf.
-       if t.Key().Size() > abi.SwissMapMaxKeyBytes {
-               c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
-               flags |= 1 // indirect key
-       } else {
-               c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
-       }
-
-       if t.Elem().Size() > abi.SwissMapMaxElemBytes {
-               c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
-               flags |= 2 // indirect value
-       } else {
-               c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
-       }
-       c.Field("BucketSize").WriteUint16(uint16(SwissMapBucketType(t).Size()))
-       if types.IsReflexive(t.Key()) {
-               flags |= 4 // reflexive key
-       }
        if needkeyupdate(t.Key()) {
-               flags |= 8 // need key update
+               flags |= abi.SwissMapNeedKeyUpdate
        }
        if hashMightPanic(t.Key()) {
-               flags |= 16 // hash might panic
+               flags |= abi.SwissMapHashMightPanic
        }
        c.Field("Flags").WriteUint32(flags)
 
index d086b74e827dfb989b1c7587d69918525415fec6..6a65bb0235c06f058497ed83e659b2ff980694ec 100644 (file)
@@ -89,7 +89,7 @@ func InitConfig() {
        _ = types.NewPtr(types.Types[types.TINT64])                             // *int64
        _ = types.NewPtr(types.ErrorType)                                       // *error
        if buildcfg.Experiment.SwissMap {
-               _ = types.NewPtr(reflectdata.SwissMapType()) // *runtime.hmap
+               _ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map
        } else {
                _ = types.NewPtr(reflectdata.OldMapType()) // *runtime.hmap
        }
@@ -5480,8 +5480,13 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
        s.startBlock(bElse)
        switch n.Op() {
        case ir.OLEN:
-               // length is stored in the first word for map/chan
-               s.vars[n] = s.load(lenType, x)
+               if buildcfg.Experiment.SwissMap && n.X.Type().IsMap() {
+                       // length is stored in the first word.
+                       s.vars[n] = s.load(lenType, x)
+               } else {
+                       // length is stored in the first word for map/chan
+                       s.vars[n] = s.load(lenType, x)
+               }
        case ir.OCAP:
                // capacity is stored in the second word for chan
                sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
index 5a8a156f02a9e9d62478e7223229bf32c2a33c76..758479b622f66a68fa7b6450cebb35dad3a6c21b 100644 (file)
@@ -39,10 +39,7 @@ func TestIntendedInlining(t *testing.T) {
                        "adjustpointer",
                        "alignDown",
                        "alignUp",
-                       "bucketMask",
-                       "bucketShift",
                        "chanbuf",
-                       "evacuated",
                        "fastlog2",
                        "float64bits",
                        "funcspdelta",
@@ -62,9 +59,6 @@ func TestIntendedInlining(t *testing.T) {
                        "stringStructOf",
                        "subtract1",
                        "subtractb",
-                       "tophash",
-                       "(*bmap).keys",
-                       "(*bmap).overflow",
                        "(*waitq).enqueue",
                        "funcInfo.entry",
 
@@ -236,6 +230,15 @@ func TestIntendedInlining(t *testing.T) {
                },
        }
 
+       if !goexperiment.SwissMap {
+               // Maps
+               want["runtime"] = append(want["runtime"], "bucketMask")
+               want["runtime"] = append(want["runtime"], "bucketShift")
+               want["runtime"] = append(want["runtime"], "evacuated")
+               want["runtime"] = append(want["runtime"], "tophash")
+               want["runtime"] = append(want["runtime"], "(*bmap).keys")
+               want["runtime"] = append(want["runtime"], "(*bmap).overflow")
+       }
        if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
                // nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable.
                // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
index 96c63528ec9eaf1b932fdf76c666955a3c9ee15c..0dba510ac44e208d454797e984b317e5287fdee7 100644 (file)
@@ -474,8 +474,10 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
                        // Format the bucket struct for map[x]y as map.bucket[x]y.
                        // This avoids a recursive print that generates very long names.
                        switch t {
-                       case mt.OldBucket, mt.SwissBucket:
+                       case mt.OldBucket:
                                b.WriteString("map.bucket[")
+                       case mt.SwissGroup:
+                               b.WriteString("map.group[")
                        default:
                                base.Fatalf("unknown internal map type")
                        }
index 9bb3a70b3ed006bd5dffccb09cb1976402200595..9d3dde8c133c7898224a73cf212d2b07754ec1a3 100644 (file)
@@ -291,7 +291,7 @@ type Map struct {
        OldBucket *Type // internal struct type representing a hash bucket
 
        // GOEXPERIMENT=swissmap fields
-       SwissBucket *Type // internal struct type representing a hash bucket
+       SwissGroup *Type // internal struct type representing a slot group
 }
 
 // MapType returns t's extra map-specific fields.
@@ -1192,15 +1192,9 @@ func (t *Type) cmp(x *Type) Cmp {
                                // to the fallthrough
                        } else if x.StructType().Map == nil {
                                return CMPgt // nil > non-nil
-                       } else if t.StructType().Map.MapType().SwissBucket == t {
-                               // Both have non-nil Map
-                               // Special case for Maps which include a recursive type where the recursion is not broken with a named type
-                               if x.StructType().Map.MapType().SwissBucket != x {
-                                       return CMPlt // bucket maps are least
-                               }
+                       } else {
+                               // TODO: I am confused by the purpose of the OldBucket stuff below.
                                return t.StructType().Map.cmp(x.StructType().Map)
-                       } else if x.StructType().Map.MapType().SwissBucket == x {
-                               return CMPgt // bucket maps are least
                        } // If t != t.Map.SwissBucket, fall through to general case
                } else {
                        if t.StructType().Map == nil {
index 19ec8d30faffc544931b64eb4112e6f083c6772c..51c5e0b94bb6a7b43872510ae6fb4e39c11c9270 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.walk/bui
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -332,62 +332,8 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
                // h = &hv
                h = stackTempAddr(init, hmapType)
 
-               // Allocate one bucket pointed to by hmap.buckets on stack if hint
-               // is not larger than BUCKETSIZE. In case hint is larger than
-               // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
-               // Maximum key and elem size is 128 bytes, larger objects
-               // are stored with an indirection. So max bucket size is 2048+eps.
-               if !ir.IsConst(hint, constant.Int) ||
-                       constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapBucketCount)) {
-
-                       // In case hint is larger than BUCKETSIZE runtime.makemap
-                       // will allocate the buckets on the heap, see #20184
-                       //
-                       // if hint <= BUCKETSIZE {
-                       //     var bv bmap
-                       //     b = &bv
-                       //     h.buckets = b
-                       // }
-
-                       nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.SwissMapBucketCount)), nil, nil)
-                       nif.Likely = true
-
-                       // var bv bmap
-                       // b = &bv
-                       b := stackTempAddr(&nif.Body, reflectdata.SwissMapBucketType(t))
-
-                       // h.buckets = b
-                       bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
-                       na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
-                       nif.Body.Append(na)
-                       appendWalkStmt(init, nif)
-               }
-       }
-
-       if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapBucketCount)) {
-               // Handling make(map[any]any) and
-               // make(map[any]any, hint) where hint <= BUCKETSIZE
-               // special allows for faster map initialization and
-               // improves binary size by using calls with fewer arguments.
-               // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
-               // and no buckets will be allocated by makemap. Therefore,
-               // no buckets need to be allocated in this code path.
-               if n.Esc() == ir.EscNone {
-                       // Only need to initialize h.hash0 since
-                       // hmap h has been allocated on the stack already.
-                       // h.hash0 = rand32()
-                       rand := mkcall("rand32", types.Types[types.TUINT32], init)
-                       hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
-                       appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
-                       return typecheck.ConvNop(h, t)
-               }
-               // Call runtime.makehmap to allocate an
-               // hmap on the heap and initialize hmap's hash0 field.
-               fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
-               return mkcall1(fn, n.Type(), init)
-       }
-
-       if n.Esc() != ir.EscNone {
+               // TODO(go.dev/issue/54766): Stack allocated table/groups.
+       } else {
                h = typecheck.NodNil()
        }
        // Map initialization with a variable or large hint is
index 93898b3a66f296a715ddd89e5fdf75a672a86df9..27e71425c1b015ae831d76d6898981455b3f2f46 100644 (file)
@@ -5,6 +5,7 @@
 package walk
 
 import (
+       "internal/buildcfg"
        "unicode/utf8"
 
        "cmd/compile/internal/base"
@@ -242,8 +243,14 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
                th := hit.Type()
                // depends on layout of iterator struct.
                // See cmd/compile/internal/reflectdata/reflect.go:MapIterType
-               keysym := th.Field(0).Sym
-               elemsym := th.Field(1).Sym // ditto
+               var keysym, elemsym *types.Sym
+               if buildcfg.Experiment.SwissMap {
+                       keysym = th.Field(0).Sym
+                       elemsym = th.Field(1).Sym // ditto
+               } else {
+                       keysym = th.Field(0).Sym
+                       elemsym = th.Field(1).Sym // ditto
+               }
 
                fn := typecheck.LookupRuntime("mapiterinit", t.Key(), t.Elem(), th)
                init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
index 7d20e28adef9b1ac8bc106fc6da03bd5597b9555..4bc9b5ea4cd1053202df4aa8cdcc617e38999040 100644 (file)
@@ -929,15 +929,16 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
                // unlikely to be useful. Most of these are used by the testing or
                // internal/fuzz packages concurrently with fuzzing.
                var skipInstrumentation = map[string]bool{
-                       "context":       true,
-                       "internal/fuzz": true,
-                       "reflect":       true,
-                       "runtime":       true,
-                       "sync":          true,
-                       "sync/atomic":   true,
-                       "syscall":       true,
-                       "testing":       true,
-                       "time":          true,
+                       "context":               true,
+                       "internal/fuzz":         true,
+                       "internal/runtime/maps": true,
+                       "reflect":               true,
+                       "runtime":               true,
+                       "sync":                  true,
+                       "sync/atomic":           true,
+                       "syscall":               true,
+                       "testing":               true,
+                       "time":                  true,
                }
                for _, p := range load.TestPackageList(ctx, pkgOpts, pkgs) {
                        if !skipInstrumentation[p.ImportPath] {
index 934db3dfa0a854219ff80e4e1592b8e4b1d0b300..2f57882efad8b49f21ce591882d9a7a594d2b6e7 100644 (file)
@@ -67,6 +67,7 @@ func TestPrefixToPathError(t *testing.T) {
 }
 
 func TestRuntimePackageList(t *testing.T) {
+       t.Skip("TODO: XXX")
        // Test that all packages imported by the runtime are marked as runtime
        // packages.
        testenv.MustHaveGoBuild(t)
index 0cf2e64e80e69d94fe64ad65a27a997138ee4990..cb30365a58b16fc92a7a75c1817450f073b3b5c4 100644 (file)
@@ -47,6 +47,7 @@ var runtimePkgs = []string{
 
        "internal/runtime/atomic",
        "internal/runtime/exithook",
+       "internal/runtime/maps",
        "internal/runtime/math",
        "internal/runtime/sys",
        "internal/runtime/syscall",
index a1378fc02c3bfa7d63b92e8da63228e7d912f1a3..6543208c70fc5944f9727424fec27cdccdbf9a3f 100644 (file)
@@ -561,7 +561,10 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym
                off += 2 * arch.PtrSize
        case abi.Map:
                if buildcfg.Experiment.SwissMap {
-                       off += 4*arch.PtrSize + 8 // internal/abi.SwissMapType
+                       off += 6*arch.PtrSize + 4 // internal/abi.SwissMapType
+                       if arch.PtrSize == 8 {
+                               off += 4 // padding for final uint32 field (Flags).
+                       }
                } else {
                        off += 4*arch.PtrSize + 8 // internal/abi.OldMapType
                }
index da4ca6d5ccb660560d59a9454f999558030076df..45037030f595c42038bdbb87c159daf9977b7c3c 100644 (file)
@@ -810,7 +810,7 @@ func (d *dwctxt) findprotodie(ctxt *Link, name string) *dwarf.DWDie {
                die = prototypedies[name]
        }
        if die == nil {
-               log.Fatalf("internal error: DIE generation failed for %s\n", name)
+               log.Fatalf("internal error: DIE generation failed for %s\nprototypedies: %+v", name, prototypedies)
        }
        return die
 }
@@ -873,8 +873,8 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
 }
 
 func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
-       hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap"))
-       bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap"))
+       hash := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.table"))
+       //bucket := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.Map"))
 
        if hash == nil {
                return
@@ -887,79 +887,82 @@ func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
                gotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym))
                keytype := decodetypeMapKey(d.ldr, d.arch, gotype)
                valtype := decodetypeMapValue(d.ldr, d.arch, gotype)
-               keydata := d.ldr.Data(keytype)
-               valdata := d.ldr.Data(valtype)
-               keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata)
+               //keydata := d.ldr.Data(keytype)
+               //valdata := d.ldr.Data(valtype)
+               //keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata)
                keytype, valtype = d.walksymtypedef(d.defgotype(keytype)), d.walksymtypedef(d.defgotype(valtype))
 
                // compute size info like hashmap.c does.
-               indirectKey, indirectVal := false, false
-               if keysize > abi.SwissMapMaxKeyBytes {
-                       keysize = int64(d.arch.PtrSize)
-                       indirectKey = true
-               }
-               if valsize > abi.SwissMapMaxElemBytes {
-                       valsize = int64(d.arch.PtrSize)
-                       indirectVal = true
-               }
+               //indirectKey, indirectVal := false, false
+               //if keysize > abi.SwissMapMaxKeyBytes {
+               //      keysize = int64(d.arch.PtrSize)
+               //      indirectKey = true
+               //}
+               //if valsize > abi.SwissMapMaxElemBytes {
+               //      valsize = int64(d.arch.PtrSize)
+               //      indirectVal = true
+               //}
 
                // Construct type to represent an array of BucketSize keys
+               // TODO
                keyname := d.nameFromDIESym(keytype)
-               dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
-                       newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount*keysize, 0)
-                       t := keytype
-                       if indirectKey {
-                               t = d.defptrto(keytype)
-                       }
-                       d.newrefattr(dwhk, dwarf.DW_AT_type, t)
-                       fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size")
-                       newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount, 0)
-                       d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
-               })
+               //dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
+               //      newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount*keysize, 0)
+               //      t := keytype
+               //      if indirectKey {
+               //              t = d.defptrto(keytype)
+               //      }
+               //      d.newrefattr(dwhk, dwarf.DW_AT_type, t)
+               //      fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size")
+               //      newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount, 0)
+               //      d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
+               //})
 
                // Construct type to represent an array of BucketSize values
+               // TODO
                valname := d.nameFromDIESym(valtype)
-               dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
-                       newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount*valsize, 0)
-                       t := valtype
-                       if indirectVal {
-                               t = d.defptrto(valtype)
-                       }
-                       d.newrefattr(dwhv, dwarf.DW_AT_type, t)
-                       fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size")
-                       newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount, 0)
-                       d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
-               })
+               //dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
+               //      newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount*valsize, 0)
+               //      t := valtype
+               //      if indirectVal {
+               //              t = d.defptrto(valtype)
+               //      }
+               //      d.newrefattr(dwhv, dwarf.DW_AT_type, t)
+               //      fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size")
+               //      newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount, 0)
+               //      d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
+               //})
 
                // Construct bucket<K,V>
-               dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) {
-                       // Copy over all fields except the field "data" from the generic
-                       // bucket. "data" will be replaced with keys/values below.
-                       d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data"))
-
-                       fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys")
-                       d.newrefattr(fld, dwarf.DW_AT_type, dwhks)
-                       newmemberoffsetattr(fld, abi.SwissMapBucketCount)
-                       fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values")
-                       d.newrefattr(fld, dwarf.DW_AT_type, dwhvs)
-                       newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*int32(keysize))
-                       fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow")
-                       d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym)))
-                       newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*(int32(keysize)+int32(valsize)))
-                       if d.arch.RegSize > d.arch.PtrSize {
-                               fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad")
-                               d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
-                               newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
-                       }
-
-                       newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount+abi.SwissMapBucketCount*keysize+abi.SwissMapBucketCount*valsize+int64(d.arch.RegSize), 0)
-               })
+               // TODO
+               //dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) {
+               //      // Copy over all fields except the field "data" from the generic
+               //      // bucket. "data" will be replaced with keys/values below.
+               //      d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data"))
+
+               //      fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys")
+               //      d.newrefattr(fld, dwarf.DW_AT_type, dwhks)
+               //      newmemberoffsetattr(fld, abi.SwissMapBucketCount)
+               //      fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values")
+               //      d.newrefattr(fld, dwarf.DW_AT_type, dwhvs)
+               //      newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*int32(keysize))
+               //      fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow")
+               //      d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym)))
+               //      newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*(int32(keysize)+int32(valsize)))
+               //      if d.arch.RegSize > d.arch.PtrSize {
+               //              fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad")
+               //              d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
+               //              newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
+               //      }
+
+               //      newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount+abi.SwissMapBucketCount*keysize+abi.SwissMapBucketCount*valsize+int64(d.arch.RegSize), 0)
+               //})
 
                // Construct hash<K,V>
                dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) {
                        d.copychildren(ctxt, dwh, hash)
-                       d.substitutetype(dwh, "buckets", d.defptrto(dwhbs))
-                       d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs))
+                       //d.substitutetype(dwh, "buckets", d.defptrto(dwhbs))
+                       //d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs))
                        newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil)
                })
 
@@ -1874,12 +1877,16 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
        prototypedies = map[string]*dwarf.DWDie{
                "type:runtime.stringStructDWARF": nil,
                "type:runtime.slice":             nil,
-               "type:runtime.hmap":              nil,
-               "type:runtime.bmap":              nil,
                "type:runtime.sudog":             nil,
                "type:runtime.waitq":             nil,
                "type:runtime.hchan":             nil,
        }
+       if buildcfg.Experiment.SwissMap {
+               prototypedies["type:internal/runtime/maps.table"] = nil
+       } else {
+               prototypedies["type:runtime.hmap"] = nil
+               prototypedies["type:runtime.bmap"] = nil
+       }
 
        // Needed by the prettyprinter code for interface inspection.
        for _, typ := range []string{
index 894cf1bd2c0e04b84afa833263479ed2112bc398..64558ff1353a941627a96a6e726a2e712d6bbb7f 100644 (file)
@@ -87,7 +87,6 @@ var depsRules = `
        < internal/runtime/syscall
        < internal/runtime/atomic
        < internal/runtime/exithook
-       < internal/runtime/maps/internal/abi
        < internal/runtime/maps
        < internal/runtime/math
        < runtime
index 97afd9da4a2bbe697ce05fb930bf94c9332545f8..ff8609efcf200137a58feb4d1a9b9db29f9dcc6a 100644 (file)
@@ -52,4 +52,3 @@ func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on
 func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic
        return mt.Flags&16 != 0
 }
-
index 3f58040a189741a43927aff25f805edc90783494..d69aefbb2963f8dfaa2fd70db381b9a3be563b70 100644 (file)
@@ -11,45 +11,31 @@ import (
 // Map constants common to several packages
 // runtime/runtime-gdb.py:MapTypePrinter contains its own copy
 const (
-       // Maximum number of key/elem pairs a bucket can hold.
-       SwissMapBucketCountBits = 3 // log2 of number of elements in a bucket.
-       SwissMapBucketCount     = 1 << SwissMapBucketCountBits
-
-       // Maximum key or elem size to keep inline (instead of mallocing per element).
-       // Must fit in a uint8.
-       // Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
-       SwissMapMaxKeyBytes  = 128
-       SwissMapMaxElemBytes = 128 // Must fit in a uint8.
+       // Number of slots in a group.
+       SwissMapGroupSlots = 8
 )
 
 type SwissMapType struct {
        Type
-       Key    *Type
-       Elem   *Type
-       Bucket *Type // internal type representing a hash bucket
+       Key   *Type
+       Elem  *Type
+       Group *Type // internal type representing a slot group
        // function for hashing keys (ptr to key, seed) -> hash
-       Hasher     func(unsafe.Pointer, uintptr) uintptr
-       KeySize    uint8  // size of key slot
-       ValueSize  uint8  // size of elem slot
-       BucketSize uint16 // size of bucket
-       Flags      uint32
+       Hasher   func(unsafe.Pointer, uintptr) uintptr
+       SlotSize uintptr // size of key/elem slot
+       ElemOff  uintptr // offset of elem in key/elem slot
+       Flags    uint32
 }
 
-// Note: flag values must match those used in the TMAP case
-// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
-func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself
-       return mt.Flags&1 != 0
-}
-func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
-       return mt.Flags&2 != 0
-}
-func (mt *SwissMapType) ReflexiveKey() bool { // true if k==k for all keys
-       return mt.Flags&4 != 0
-}
+// Flag values
+const (
+       SwissMapNeedKeyUpdate = 1 << iota
+       SwissMapHashMightPanic
+)
+
 func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
-       return mt.Flags&8 != 0
+       return mt.Flags&SwissMapNeedKeyUpdate != 0
 }
 func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
-       return mt.Flags&16 != 0
+       return mt.Flags&SwissMapHashMightPanic != 0
 }
-
index d2449a65ac3e9822a0e10cf9432e4b7afdadd4b9..f68523a348833b207dfde138fb5c1fe325dc341f 100644 (file)
@@ -52,6 +52,7 @@ var rtPkgs = [...]string{
        "internal/chacha8rand",
        "internal/runtime/sys",
        "internal/abi",
+       "internal/runtime/maps",
        "internal/runtime/math",
        "internal/bytealg",
        "internal/goexperiment",
diff --git a/src/internal/runtime/maps/export_noswiss_test.go b/src/internal/runtime/maps/export_noswiss_test.go
new file mode 100644 (file)
index 0000000..32d6d13
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.swissmap
+
+// This file allows non-GOEXPERIMENT=swissmap builds (i.e., old map builds) to
+// construct a swissmap table for running the tests in this package.
+
+package maps
+
+import (
+       "internal/abi"
+       "unsafe"
+)
+
+type instantiatedGroup[K comparable, V any] struct {
+       ctrls ctrlGroup
+       slots [abi.SwissMapGroupSlots]instantiatedSlot[K, V]
+}
+
+type instantiatedSlot[K comparable, V any] struct {
+       key  K
+       elem V
+}
+
+func NewTestTable[K comparable, V any](length uint64) *table {
+       var m map[K]V
+       mTyp := abi.TypeOf(m)
+       omt := (*abi.OldMapType)(unsafe.Pointer(mTyp))
+
+       var grp instantiatedGroup[K, V]
+       var slot instantiatedSlot[K, V]
+
+       mt := &abi.SwissMapType{
+               Key:      omt.Key,
+               Elem:     omt.Elem,
+               Group:    abi.TypeOf(grp),
+               Hasher:   omt.Hasher,
+               SlotSize: unsafe.Sizeof(slot),
+               ElemOff:  unsafe.Offsetof(slot.elem),
+       }
+       if omt.NeedKeyUpdate() {
+               mt.Flags |= abi.SwissMapNeedKeyUpdate
+       }
+       if omt.HashMightPanic() {
+               mt.Flags |= abi.SwissMapHashMightPanic
+       }
+       return newTable(mt, length)
+}
diff --git a/src/internal/runtime/maps/export_swiss_test.go b/src/internal/runtime/maps/export_swiss_test.go
new file mode 100644 (file)
index 0000000..9a12324
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package maps
+
+import (
+       "internal/abi"
+       "unsafe"
+)
+
+func NewTestTable[K comparable, V any](length uint64) *table {
+       var m map[K]V
+       mTyp := abi.TypeOf(m)
+       mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
+       return newTable(mt, length)
+}
index e2512d332ac00f60ec8c61cac16b3c69c1f996ba..2856a7314e699dc1adecfcf13873abe266918721 100644 (file)
@@ -6,7 +6,6 @@ package maps
 
 import (
        "internal/abi"
-       sabi "internal/runtime/maps/internal/abi"
        "unsafe"
 )
 
@@ -16,41 +15,16 @@ const DebugLog = debugLog
 
 var AlignUpPow2 = alignUpPow2
 
-type instantiatedGroup[K comparable, V any] struct {
-       ctrls ctrlGroup
-       slots [sabi.SwissMapGroupSlots]instantiatedSlot[K, V]
-}
-
-type instantiatedSlot[K comparable, V any] struct {
-       key  K
-       elem V
+func (t *table) Type() *abi.SwissMapType {
+       return t.typ
 }
 
-func NewTestTable[K comparable, V any](length uint64) *table {
-       var m map[K]V
-       mTyp := abi.TypeOf(m)
-       omt := (*abi.OldMapType)(unsafe.Pointer(mTyp))
-
-       var grp instantiatedGroup[K, V]
-       var slot instantiatedSlot[K, V]
-
-       mt := &sabi.SwissMapType{
-               Key:      omt.Key,
-               Elem:     omt.Elem,
-               Group:    abi.TypeOf(grp),
-               Hasher:   omt.Hasher,
-               SlotSize: unsafe.Sizeof(slot),
-               ElemOff:  unsafe.Offsetof(slot.elem),
-       }
-       if omt.NeedKeyUpdate() {
-               mt.Flags |= sabi.SwissMapNeedKeyUpdate
-       }
-       if omt.HashMightPanic() {
-               mt.Flags |= sabi.SwissMapHashMightPanic
-       }
-       return newTable(mt, length)
+// Returns the start address of the groups array.
+func (t *table) GroupsStart() unsafe.Pointer {
+       return t.groups.data
 }
 
-func (t *table) Type() *sabi.SwissMapType {
-       return t.typ
+// Returns the length of the groups array.
+func (t *table) GroupsLength() uintptr {
+       return uintptr(t.groups.lengthMask + 1)
 }
index 822e3773eaa82563d6e9339619b8670d37fffe7e..e03ed98c94fc765c3caef1557ab50536f59cf139 100644 (file)
@@ -5,8 +5,8 @@
 package maps
 
 import (
+       "internal/abi"
        "internal/goarch"
-       "internal/runtime/maps/internal/abi"
        "internal/runtime/sys"
        "unsafe"
 )
diff --git a/src/internal/runtime/maps/internal/abi/map_swiss.go b/src/internal/runtime/maps/internal/abi/map_swiss.go
deleted file mode 100644 (file)
index caa0827..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package abi is a temporary copy of the swissmap abi. It will be eliminated
-// once swissmaps are integrated into the runtime.
-package abi
-
-import (
-       "internal/abi"
-       "unsafe"
-)
-
-// Map constants common to several packages
-// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
-const (
-       // Number of slots in a group.
-       SwissMapGroupSlots = 8
-)
-
-type SwissMapType struct {
-       abi.Type
-       Key   *abi.Type
-       Elem  *abi.Type
-       Group *abi.Type // internal type representing a slot group
-       // function for hashing keys (ptr to key, seed) -> hash
-       Hasher     func(unsafe.Pointer, uintptr) uintptr
-       SlotSize   uintptr // size of key/elem slot
-       ElemOff    uintptr // offset of elem in key/elem slot
-       Flags      uint32
-}
-
-// Flag values
-const (
-       SwissMapNeedKeyUpdate = 1 << iota
-       SwissMapHashMightPanic
-)
-
-func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
-       return mt.Flags&SwissMapNeedKeyUpdate != 0
-}
-func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
-       return mt.Flags&SwissMapHashMightPanic != 0
-}
index 53b4a62071a104f9819ca0ca4523fba0996b364e..d11535657b6b9c431a1e4d1450c20531849837b9 100644 (file)
@@ -6,8 +6,8 @@ package maps_test
 
 import (
        "fmt"
+       "internal/abi"
        "internal/runtime/maps"
-       "internal/runtime/maps/internal/abi"
        "math"
        "testing"
        "unsafe"
@@ -444,4 +444,11 @@ func TestTableZeroSizeSlot(t *testing.T) {
        if gotElem != elem {
                t.Errorf("Get(%d) got elem %d want %d", key, gotElem, elem)
        }
+
+       start := tab.GroupsStart()
+       length := tab.GroupsLength()
+       end := unsafe.Pointer(uintptr(start) + length*tab.Type().Group.Size() - 1) // inclusive to ensure we have a valid pointer
+       if uintptr(got) < uintptr(start) || uintptr(got) > uintptr(end) {
+               t.Errorf("elem address outside groups allocation; got %p want [%p, %p]", got, start, end)
+       }
 }
index 3516b92fbacf5b40269a24e347e5a5cdc3578db3..2c13be84688e0506f4db01a3ecc1b61e8e0e4746 100644 (file)
@@ -6,7 +6,7 @@
 package maps
 
 import (
-       "internal/runtime/maps/internal/abi"
+       "internal/abi"
        "unsafe"
 )
 
index 7170fb68fe448f46591a5b7ce14668ae2bc50334..b800858e559d4335d9ad0c20eca5c10af2437fef 100644 (file)
@@ -6,7 +6,7 @@
 package maps
 
 import (
-       sabi "internal/runtime/maps/internal/abi"
+       "internal/abi"
        "unsafe"
 )
 
@@ -24,7 +24,7 @@ func (t *table) checkInvariants() {
        var empty uint64
        for i := uint64(0); i <= t.groups.lengthMask; i++ {
                g := t.groups.group(i)
-               for j := uint32(0); j < sabi.SwissMapGroupSlots; j++ {
+               for j := uint32(0); j < abi.SwissMapGroupSlots; j++ {
                        c := g.ctrls().get(j)
                        switch {
                        case c == ctrlDeleted:
@@ -60,7 +60,7 @@ func (t *table) checkInvariants() {
                panic("invariant failed: found mismatched used slot count")
        }
 
-       growthLeft := (t.capacity*maxAvgGroupLoad)/sabi.SwissMapGroupSlots - t.used - deleted
+       growthLeft := (t.capacity*maxAvgGroupLoad)/abi.SwissMapGroupSlots - t.used - deleted
        if growthLeft != t.growthLeft {
                print("invariant failed: found ", t.growthLeft, " growthLeft, but expected ", growthLeft, "\n")
                t.Print()
@@ -93,7 +93,7 @@ func (t *table) Print() {
 
                g := t.groups.group(i)
                ctrls := g.ctrls()
-               for j := uint32(0); j < sabi.SwissMapGroupSlots; j++ {
+               for j := uint32(0); j < abi.SwissMapGroupSlots; j++ {
                        print("\t\t\tslot ", j, "\n")
 
                        c := ctrls.get(j)
index e1bd37a70a584ccfbdb8d903d3d3d9c8bfa462d4..b3f4545531533580fa2dc0077b728c2de9bb5c14 100644 (file)
@@ -10,7 +10,6 @@ import (
        "flag"
        "fmt"
        "go/token"
-       "internal/abi"
        "internal/goarch"
        "internal/goexperiment"
        "internal/testenv"
@@ -1134,13 +1133,15 @@ var deepEqualTests = []DeepEqualTest{
 }
 
 func TestDeepEqual(t *testing.T) {
-       for _, test := range deepEqualTests {
-               if test.b == (self{}) {
-                       test.b = test.a
-               }
-               if r := DeepEqual(test.a, test.b); r != test.eq {
-                       t.Errorf("DeepEqual(%#v, %#v) = %v, want %v", test.a, test.b, r, test.eq)
-               }
+       for i, test := range deepEqualTests {
+               t.Run(fmt.Sprint(i), func(t *testing.T) {
+                       if test.b == (self{}) {
+                               test.b = test.a
+                       }
+                       if r := DeepEqual(test.a, test.b); r != test.eq {
+                               t.Errorf("DeepEqual(%#v, %#v) = %v, want %v", test.a, test.b, r, test.eq)
+                       }
+               })
        }
 }
 
@@ -1273,6 +1274,11 @@ var deepEqualPerfTests = []struct {
 }
 
 func TestDeepEqualAllocs(t *testing.T) {
+       // TODO(prattmic): maps on stack
+       if goexperiment.SwissMap {
+               t.Skipf("Maps on stack not yet implemented")
+       }
+
        for _, tt := range deepEqualPerfTests {
                t.Run(ValueOf(tt.x).Type().String(), func(t *testing.T) {
                        got := testing.AllocsPerRun(100, func() {
@@ -7171,60 +7177,61 @@ func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
        t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
 }
 
-func TestGCBits(t *testing.T) {
-       verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1})
+// Building blocks for types seen by the compiler (like [2]Xscalar).
+// The compiler will create the type structures for the derived types,
+// including their GC metadata.
+type Xscalar struct{ x uintptr }
+type Xptr struct{ x *byte }
+type Xptrscalar struct {
+       *byte
+       uintptr
+}
+type Xscalarptr struct {
+       uintptr
+       *byte
+}
+type Xbigptrscalar struct {
+       _ [100]*byte
+       _ [100]uintptr
+}
+
+var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type
 
-       // Building blocks for types seen by the compiler (like [2]Xscalar).
-       // The compiler will create the type structures for the derived types,
-       // including their GC metadata.
-       type Xscalar struct{ x uintptr }
-       type Xptr struct{ x *byte }
-       type Xptrscalar struct {
+func init() {
+       // Building blocks for types constructed by reflect.
+       // This code is in a separate block so that code below
+       // cannot accidentally refer to these.
+       // The compiler must NOT see types derived from these
+       // (for example, [2]Scalar must NOT appear in the program),
+       // or else reflect will use it instead of having to construct one.
+       // The goal is to test the construction.
+       type Scalar struct{ x uintptr }
+       type Ptr struct{ x *byte }
+       type Ptrscalar struct {
                *byte
                uintptr
        }
-       type Xscalarptr struct {
+       type Scalarptr struct {
                uintptr
                *byte
        }
-       type Xbigptrscalar struct {
+       type Bigptrscalar struct {
                _ [100]*byte
                _ [100]uintptr
        }
+       type Int64 int64
+       Tscalar = TypeOf(Scalar{})
+       Tint64 = TypeOf(Int64(0))
+       Tptr = TypeOf(Ptr{})
+       Tscalarptr = TypeOf(Scalarptr{})
+       Tptrscalar = TypeOf(Ptrscalar{})
+       Tbigptrscalar = TypeOf(Bigptrscalar{})
+}
 
-       var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type
-       {
-               // Building blocks for types constructed by reflect.
-               // This code is in a separate block so that code below
-               // cannot accidentally refer to these.
-               // The compiler must NOT see types derived from these
-               // (for example, [2]Scalar must NOT appear in the program),
-               // or else reflect will use it instead of having to construct one.
-               // The goal is to test the construction.
-               type Scalar struct{ x uintptr }
-               type Ptr struct{ x *byte }
-               type Ptrscalar struct {
-                       *byte
-                       uintptr
-               }
-               type Scalarptr struct {
-                       uintptr
-                       *byte
-               }
-               type Bigptrscalar struct {
-                       _ [100]*byte
-                       _ [100]uintptr
-               }
-               type Int64 int64
-               Tscalar = TypeOf(Scalar{})
-               Tint64 = TypeOf(Int64(0))
-               Tptr = TypeOf(Ptr{})
-               Tscalarptr = TypeOf(Scalarptr{})
-               Tptrscalar = TypeOf(Ptrscalar{})
-               Tbigptrscalar = TypeOf(Bigptrscalar{})
-       }
-
-       empty := []byte{}
+var empty = []byte{}
+
+func TestGCBits(t *testing.T) {
+       verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1})
 
        verifyGCBits(t, TypeOf(Xscalar{}), empty)
        verifyGCBits(t, Tscalar, empty)
@@ -7304,95 +7311,7 @@ func TestGCBits(t *testing.T) {
        verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
        verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
 
-       if goexperiment.SwissMap {
-               const bucketCount = abi.SwissMapBucketCount
-
-               hdr := make([]byte, bucketCount/goarch.PtrSize)
-
-               verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
-                       verifyGCBits(t, MapBucketOf(k, e), want)
-                       verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
-               }
-               verifyMapBucket(t,
-                       Tscalar, Tptr,
-                       map[Xscalar]Xptr(nil),
-                       join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t,
-                       Tscalarptr, Tptr,
-                       map[Xscalarptr]Xptr(nil),
-                       join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t, Tint64, Tptr,
-                       map[int64]Xptr(nil),
-                       join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t,
-                       Tscalar, Tscalar,
-                       map[Xscalar]Xscalar(nil),
-                       empty)
-               verifyMapBucket(t,
-                       ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
-                       map[[2]Xscalarptr][3]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
-                       map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
-                       map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
-                       map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
-                       map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
-       } else {
-               const bucketCount = abi.OldMapBucketCount
-
-               hdr := make([]byte, bucketCount/goarch.PtrSize)
-
-               verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
-                       verifyGCBits(t, MapBucketOf(k, e), want)
-                       verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
-               }
-               verifyMapBucket(t,
-                       Tscalar, Tptr,
-                       map[Xscalar]Xptr(nil),
-                       join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t,
-                       Tscalarptr, Tptr,
-                       map[Xscalarptr]Xptr(nil),
-                       join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t, Tint64, Tptr,
-                       map[int64]Xptr(nil),
-                       join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t,
-                       Tscalar, Tscalar,
-                       map[Xscalar]Xscalar(nil),
-                       empty)
-               verifyMapBucket(t,
-                       ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
-                       map[[2]Xscalarptr][3]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
-                       map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
-                       map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
-                       map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
-               verifyMapBucket(t,
-                       ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
-                       map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
-                       join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
-       }
+       testGCBitsMap(t)
 }
 
 func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
diff --git a/src/reflect/export_noswiss_test.go b/src/reflect/export_noswiss_test.go
new file mode 100644 (file)
index 0000000..34e5e92
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright 2024 Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.swissmap
+
+package reflect
+
+import (
+       "internal/abi"
+       "unsafe"
+)
+
+func MapBucketOf(x, y Type) Type {
+       return toType(bucketOf(x.common(), y.common()))
+}
+
+func CachedBucketOf(m Type) Type {
+       t := m.(*rtype)
+       if Kind(t.t.Kind_&abi.KindMask) != Map {
+               panic("not map")
+       }
+       tt := (*mapType)(unsafe.Pointer(t))
+       return toType(tt.Bucket)
+}
diff --git a/src/reflect/export_swiss_test.go b/src/reflect/export_swiss_test.go
new file mode 100644 (file)
index 0000000..ac3cd0a
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright 2024 Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package reflect
+
+func MapGroupOf(x, y Type) Type {
+       grp, _ := groupAndSlotOf(x, y)
+       return grp
+}
index 30a0e823afe5f4fd0dc403521bfab9f939b12ed5..7ab3e957fc4e560f1c2cd0f8e5601b48ea61fbb7 100644 (file)
@@ -91,19 +91,6 @@ var GCBits = gcbits
 
 func gcbits(any) []byte // provided by runtime
 
-func MapBucketOf(x, y Type) Type {
-       return toType(bucketOf(x.common(), y.common()))
-}
-
-func CachedBucketOf(m Type) Type {
-       t := m.(*rtype)
-       if Kind(t.t.Kind_&abi.KindMask) != Map {
-               panic("not map")
-       }
-       tt := (*mapType)(unsafe.Pointer(t))
-       return toType(tt.Bucket)
-}
-
 type EmbedWithUnexpMeth struct{}
 
 func (EmbedWithUnexpMeth) f() {}
diff --git a/src/reflect/map_noswiss_test.go b/src/reflect/map_noswiss_test.go
new file mode 100644 (file)
index 0000000..52fcf89
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.swissmap
+
+package reflect_test
+
+import (
+       "internal/abi"
+       "internal/goarch"
+       . "reflect"
+       "testing"
+)
+
+func testGCBitsMap(t *testing.T) {
+       const bucketCount = abi.OldMapBucketCount
+
+       hdr := make([]byte, bucketCount/goarch.PtrSize)
+
+       verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
+               verifyGCBits(t, MapBucketOf(k, e), want)
+               verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
+       }
+       verifyMapBucket(t,
+               Tscalar, Tptr,
+               map[Xscalar]Xptr(nil),
+               join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
+       verifyMapBucket(t,
+               Tscalarptr, Tptr,
+               map[Xscalarptr]Xptr(nil),
+               join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
+       verifyMapBucket(t, Tint64, Tptr,
+               map[int64]Xptr(nil),
+               join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
+       verifyMapBucket(t,
+               Tscalar, Tscalar,
+               map[Xscalar]Xscalar(nil),
+               empty)
+       verifyMapBucket(t,
+               ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
+               map[[2]Xscalarptr][3]Xptrscalar(nil),
+               join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
+       verifyMapBucket(t,
+               ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+               map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+               join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+       verifyMapBucket(t,
+               ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+               map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+               join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+       verifyMapBucket(t,
+               ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+               map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+               join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
+       verifyMapBucket(t,
+               ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+               map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+               join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
+}
index 8978b377c767265979e1020c028d41c700433d12..f6a56f7a6529bdeb3e3e1093623651cf49671f6e 100644 (file)
@@ -8,7 +8,7 @@ package reflect
 
 import (
        "internal/abi"
-       "internal/goarch"
+       "internal/runtime/maps"
        "unsafe"
 )
 
@@ -55,6 +55,8 @@ func MapOf(key, elem Type) Type {
                }
        }
 
+       group, slot := groupAndSlotOf(key, elem)
+
        // Make a map type.
        // Note: flag values must match those used in the TMAP case
        // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
@@ -65,32 +67,19 @@ func MapOf(key, elem Type) Type {
        mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
        mt.Key = ktyp
        mt.Elem = etyp
-       mt.Bucket = bucketOf(ktyp, etyp)
+       mt.Group = group.common()
        mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
                return typehash(ktyp, p, seed)
        }
+       mt.SlotSize = slot.Size()
+       mt.ElemOff = slot.Field(1).Offset
        mt.Flags = 0
-       if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
-               mt.KeySize = uint8(goarch.PtrSize)
-               mt.Flags |= 1 // indirect key
-       } else {
-               mt.KeySize = uint8(ktyp.Size_)
-       }
-       if etyp.Size_ > abi.SwissMapMaxElemBytes {
-               mt.ValueSize = uint8(goarch.PtrSize)
-               mt.Flags |= 2 // indirect value
-       } else {
-               mt.ValueSize = uint8(etyp.Size_)
-       }
-       mt.BucketSize = uint16(mt.Bucket.Size_)
-       if isReflexive(ktyp) {
-               mt.Flags |= 4
-       }
+       // TODO(prattmic): indirect key/elem flags
        if needKeyUpdate(ktyp) {
-               mt.Flags |= 8
+               mt.Flags |= abi.SwissMapNeedKeyUpdate
        }
        if hashMightPanic(ktyp) {
-               mt.Flags |= 16
+               mt.Flags |= abi.SwissMapHashMightPanic
        }
        mt.PtrToThis = 0
 
@@ -98,67 +87,41 @@ func MapOf(key, elem Type) Type {
        return ti.(Type)
 }
 
-func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
-       if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
-               ktyp = ptrTo(ktyp)
-       }
-       if etyp.Size_ > abi.SwissMapMaxElemBytes {
-               etyp = ptrTo(etyp)
-       }
-
-       // Prepare GC data if any.
-       // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
-       // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
-       // Note that since the key and value are known to be <= 128 bytes,
-       // they're guaranteed to have bitmaps instead of GC programs.
-       var gcdata *byte
-       var ptrdata uintptr
-
-       size := abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
-       if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
-               panic("reflect: bad size computation in MapOf")
-       }
-
-       if ktyp.Pointers() || etyp.Pointers() {
-               nptr := (abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
-               n := (nptr + 7) / 8
-
-               // Runtime needs pointer masks to be a multiple of uintptr in size.
-               n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
-               mask := make([]byte, n)
-               base := uintptr(abi.SwissMapBucketCount / goarch.PtrSize)
-
-               if ktyp.Pointers() {
-                       emitGCMask(mask, base, ktyp, abi.SwissMapBucketCount)
-               }
-               base += abi.SwissMapBucketCount * ktyp.Size_ / goarch.PtrSize
-
-               if etyp.Pointers() {
-                       emitGCMask(mask, base, etyp, abi.SwissMapBucketCount)
-               }
-               base += abi.SwissMapBucketCount * etyp.Size_ / goarch.PtrSize
-
-               word := base
-               mask[word/8] |= 1 << (word % 8)
-               gcdata = &mask[0]
-               ptrdata = (word + 1) * goarch.PtrSize
-
-               // overflow word must be last
-               if ptrdata != size {
-                       panic("reflect: bad layout computation in MapOf")
-               }
-       }
-
-       b := &abi.Type{
-               Align_:   goarch.PtrSize,
-               Size_:    size,
-               Kind_:    abi.Struct,
-               PtrBytes: ptrdata,
-               GCData:   gcdata,
-       }
-       s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
-       b.Str = resolveReflectName(newName(s, "", false, false))
-       return b
+func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
+       // TODO(prattmic): indirect key/elem flags
+
+       // type group struct {
+       //     ctrl uint64
+       //     slots [abi.SwissMapGroupSlots]struct {
+       //         key  keyType
+       //         elem elemType
+       //     }
+       // }
+
+       fields := []StructField{
+               {
+                       Name: "Key",
+                       Type: ktyp,
+               },
+               {
+                       Name: "Elem",
+                       Type: etyp,
+               },
+       }
+       slot := StructOf(fields)
+
+       fields = []StructField{
+               {
+                       Name: "Ctrl",
+                       Type: TypeFor[uint64](),
+               },
+               {
+                       Name: "Slots",
+                       Type: ArrayOf(abi.SwissMapGroupSlots, slot),
+               },
+       }
+       group := StructOf(fields)
+       return group, slot
 }
 
 var stringType = rtypeOf("")
@@ -181,7 +144,8 @@ func (v Value) MapIndex(key Value) Value {
 
        var e unsafe.Pointer
        // TODO(#54766): temporarily disable specialized variants.
-       if false && (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+       //if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+       if false {
                k := *(*string)(key.ptr)
                e = mapaccess_faststr(v.typ(), v.pointer(), k)
        } else {
@@ -219,12 +183,12 @@ func (v Value) MapKeys() []Value {
        if m != nil {
                mlen = maplen(m)
        }
-       var it hiter
+       var it maps.Iter
        mapiterinit(v.typ(), m, &it)
        a := make([]Value, mlen)
        var i int
        for i = 0; i < len(a); i++ {
-               key := it.key
+               key := it.Key()
                if key == nil {
                        // Someone deleted an entry from the map since we
                        // called maplen above. It's a data race, but nothing
@@ -237,45 +201,23 @@ func (v Value) MapKeys() []Value {
        return a[:i]
 }
 
-// hiter's structure matches runtime.hiter's structure.
-// Having a clone here allows us to embed a map iterator
-// inside type MapIter so that MapIters can be re-used
-// without doing any allocations.
-type hiter struct {
-       key         unsafe.Pointer
-       elem        unsafe.Pointer
-       t           unsafe.Pointer
-       h           unsafe.Pointer
-       buckets     unsafe.Pointer
-       bptr        unsafe.Pointer
-       overflow    *[]unsafe.Pointer
-       oldoverflow *[]unsafe.Pointer
-       startBucket uintptr
-       offset      uint8
-       wrapped     bool
-       B           uint8
-       i           uint8
-       bucket      uintptr
-       checkBucket uintptr
-}
-
-func (h *hiter) initialized() bool {
-       return h.t != nil
-}
-
 // A MapIter is an iterator for ranging over a map.
 // See [Value.MapRange].
 type MapIter struct {
        m     Value
-       hiter hiter
+       hiter maps.Iter
 }
 
+// TODO(prattmic): only for sharing the linkname declarations with old maps.
+// Remove with old maps.
+type hiter = maps.Iter
+
 // Key returns the key of iter's current map entry.
 func (iter *MapIter) Key() Value {
-       if !iter.hiter.initialized() {
+       if !iter.hiter.Initialized() {
                panic("MapIter.Key called before Next")
        }
-       iterkey := iter.hiter.key
+       iterkey := iter.hiter.Key()
        if iterkey == nil {
                panic("MapIter.Key called on exhausted iterator")
        }
@@ -290,10 +232,10 @@ func (iter *MapIter) Key() Value {
 // As in Go, the key must be assignable to v's type and
 // must not be derived from an unexported field.
 func (v Value) SetIterKey(iter *MapIter) {
-       if !iter.hiter.initialized() {
+       if !iter.hiter.Initialized() {
                panic("reflect: Value.SetIterKey called before Next")
        }
-       iterkey := iter.hiter.key
+       iterkey := iter.hiter.Key()
        if iterkey == nil {
                panic("reflect: Value.SetIterKey called on exhausted iterator")
        }
@@ -315,10 +257,10 @@ func (v Value) SetIterKey(iter *MapIter) {
 
 // Value returns the value of iter's current map entry.
 func (iter *MapIter) Value() Value {
-       if !iter.hiter.initialized() {
+       if !iter.hiter.Initialized() {
                panic("MapIter.Value called before Next")
        }
-       iterelem := iter.hiter.elem
+       iterelem := iter.hiter.Elem()
        if iterelem == nil {
                panic("MapIter.Value called on exhausted iterator")
        }
@@ -333,10 +275,10 @@ func (iter *MapIter) Value() Value {
 // As in Go, the value must be assignable to v's type and
 // must not be derived from an unexported field.
 func (v Value) SetIterValue(iter *MapIter) {
-       if !iter.hiter.initialized() {
+       if !iter.hiter.Initialized() {
                panic("reflect: Value.SetIterValue called before Next")
        }
-       iterelem := iter.hiter.elem
+       iterelem := iter.hiter.Elem()
        if iterelem == nil {
                panic("reflect: Value.SetIterValue called on exhausted iterator")
        }
@@ -363,15 +305,15 @@ func (iter *MapIter) Next() bool {
        if !iter.m.IsValid() {
                panic("MapIter.Next called on an iterator that does not have an associated map Value")
        }
-       if !iter.hiter.initialized() {
+       if !iter.hiter.Initialized() {
                mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
        } else {
-               if iter.hiter.key == nil {
+               if iter.hiter.Key() == nil {
                        panic("MapIter.Next called on exhausted iterator")
                }
                mapiternext(&iter.hiter)
        }
-       return iter.hiter.key != nil
+       return iter.hiter.Key() != nil
 }
 
 // Reset modifies iter to iterate over v.
@@ -383,7 +325,7 @@ func (iter *MapIter) Reset(v Value) {
                v.mustBe(Map)
        }
        iter.m = v
-       iter.hiter = hiter{}
+       iter.hiter = maps.Iter{}
 }
 
 // MapRange returns a range iterator for a map.
@@ -425,7 +367,8 @@ func (v Value) SetMapIndex(key, elem Value) {
        tt := (*mapType)(unsafe.Pointer(v.typ()))
 
        // TODO(#54766): temporarily disable specialized variants.
-       if false && (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+       //if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+       if false {
                k := *(*string)(key.ptr)
                if elem.typ() == nil {
                        mapdelete_faststr(v.typ(), v.pointer(), k)
diff --git a/src/reflect/map_swiss_test.go b/src/reflect/map_swiss_test.go
new file mode 100644 (file)
index 0000000..621140a
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package reflect_test
+
+import (
+       "reflect"
+       "testing"
+)
+
+func testGCBitsMap(t *testing.T) {
+       // Unlike old maps, we don't manually construct GC data for swiss maps,
+       // instead using the public reflect API in groupAndSlotOf.
+}
+
+// See also runtime_test.TestGroupSizeZero.
+func TestGroupSizeZero(t *testing.T) {
+       st := reflect.TypeFor[struct{}]()
+       grp := reflect.MapGroupOf(st, st)
+
+       // internal/runtime/maps when create pointers to slots, even if slots
+       // are size 0. We should have reserved an extra word to ensure that
+       // pointers to the zero-size type at the end of group are valid.
+       if grp.Size() <= 8 {
+               t.Errorf("Group size got %d want >8", grp.Size())
+       }
+}
index 9e6d81a77c01e1b07328027ede5d0280afc8e483..4638afa6b896ba7c5709f422074cf9913d86d761 100644 (file)
@@ -11,6 +11,12 @@ import (
        "unsafe"
 )
 
+const RuntimeHmapSize = unsafe.Sizeof(hmap{})
+
+func OverLoadFactor(count int, B uint8) bool {
+       return overLoadFactor(count, B)
+}
+
 func MapBucketsCount(m map[int]int) int {
        h := *(**hmap)(unsafe.Pointer(&m))
        return 1 << h.B
index ac0308fce060ead9a2910538ed229a39823bcce2..55a7d6ff043d7c839cb66f465c6762a91f232ef9 100644 (file)
@@ -6,53 +6,6 @@
 
 package runtime
 
-import (
-       "internal/abi"
-       "unsafe"
-)
-
-func MapBucketsCount(m map[int]int) int {
-       h := *(**hmap)(unsafe.Pointer(&m))
-       return 1 << h.B
-}
-
-func MapBucketsPointerIsNil(m map[int]int) bool {
-       h := *(**hmap)(unsafe.Pointer(&m))
-       return h.buckets == nil
-}
-
 func MapTombstoneCheck(m map[int]int) {
-       // Make sure emptyOne and emptyRest are distributed correctly.
-       // We should have a series of filled and emptyOne cells, followed by
-       // a series of emptyRest cells.
-       h := *(**hmap)(unsafe.Pointer(&m))
-       i := any(m)
-       t := *(**maptype)(unsafe.Pointer(&i))
-
-       for x := 0; x < 1<<h.B; x++ {
-               b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
-               n := 0
-               for b := b0; b != nil; b = b.overflow(t) {
-                       for i := 0; i < abi.SwissMapBucketCount; i++ {
-                               if b.tophash[i] != emptyRest {
-                                       n++
-                               }
-                       }
-               }
-               k := 0
-               for b := b0; b != nil; b = b.overflow(t) {
-                       for i := 0; i < abi.SwissMapBucketCount; i++ {
-                               if k < n && b.tophash[i] == emptyRest {
-                                       panic("early emptyRest")
-                               }
-                               if k >= n && b.tophash[i] != emptyRest {
-                                       panic("late non-emptyRest")
-                               }
-                               if k == n-1 && b.tophash[i] == emptyOne {
-                                       panic("last non-emptyRest entry is emptyOne")
-                               }
-                               k++
-                       }
-               }
-       }
+       // TODO
 }
index 3bde1aea29590e79f656ccba773500153331d346..5c8d2b18c6b9fec0a615777b150eb32037c82f7c 100644 (file)
@@ -481,12 +481,6 @@ func (rw *RWMutex) Unlock() {
        rw.rw.unlock()
 }
 
-const RuntimeHmapSize = unsafe.Sizeof(hmap{})
-
-func OverLoadFactor(count int, B uint8) bool {
-       return overLoadFactor(count, B)
-}
-
 func LockOSCounts() (external, internal uint32) {
        gp := getg()
        if gp.m.lockedExt+gp.m.lockedInt == 0 {
index f2482393dd554bc9344763e6db8ce8fbe44c61f7..5cc237698228bdf4a92a6641f362830b44e39379 100644 (file)
@@ -7,29 +7,31 @@
 package runtime
 
 import (
+       "internal/abi"
+       "internal/runtime/maps"
        "unsafe"
 )
 
-func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer {
        throw("mapaccess1_fast32 unimplemented")
        panic("unreachable")
 }
 
-func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
+func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool) {
        throw("mapaccess2_fast32 unimplemented")
        panic("unreachable")
 }
 
-func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer {
        throw("mapassign_fast32 unimplemented")
        panic("unreachable")
 }
 
-func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
        throw("mapassign_fast32ptr unimplemented")
        panic("unreachable")
 }
 
-func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
+func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) {
        throw("mapdelete_fast32 unimplemented")
 }
index 07ed9934c39c34008bf8c529351ac654b4c95e97..bf892fe83ff92636145323b4e00949ba1785b8d4 100644 (file)
@@ -7,29 +7,31 @@
 package runtime
 
 import (
+       "internal/abi"
+       "internal/runtime/maps"
        "unsafe"
 )
 
-func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer {
        throw("mapaccess1_fast64 unimplemented")
        panic("unreachable")
 }
 
-func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
+func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool) {
        throw("mapaccess2_fast64 unimplemented")
        panic("unreachable")
 }
 
-func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer {
        throw("mapassign_fast64 unimplemented")
        panic("unreachable")
 }
 
-func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
        throw("mapassign_fast64ptr unimplemented")
        panic("unreachable")
 }
 
-func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
+func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) {
        throw("mapdelete_fast64 unimplemented")
 }
index 41090d7381981b20ab521b9c16109ee86926b2a7..b0fb54315a6ed1427939a2a766f905baadb1447d 100644 (file)
@@ -7,24 +7,26 @@
 package runtime
 
 import (
+       "internal/abi"
+       "internal/runtime/maps"
        "unsafe"
 )
 
-func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
+func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer {
        throw("mapaccess1_faststr unimplemented")
        panic("unreachable")
 }
 
-func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
+func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool) {
        throw("mapaccess2_faststr unimplemented")
        panic("unreachable")
 }
 
-func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
+func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer {
        throw("mapassign_faststr unimplemented")
        panic("unreachable")
 }
 
-func mapdelete_faststr(t *maptype, h *hmap, ky string) {
+func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string) {
        throw("mapdelete_faststr unimplemented")
 }
index 72d7e6d362a9c237550fd7b4c4d9ad71dd34e879..bda448471c308ad173be4910ca6c6451a1f8b35b 100644 (file)
@@ -8,11 +8,37 @@ package runtime_test
 
 import (
        "internal/abi"
+       "internal/goarch"
        "runtime"
        "slices"
        "testing"
 )
 
+func TestHmapSize(t *testing.T) {
+       // The structure of hmap is defined in runtime/map.go
+       // and in cmd/compile/internal/reflectdata/map.go and must be in sync.
+       // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
+       var hmapSize = uintptr(8 + 5*goarch.PtrSize)
+       if runtime.RuntimeHmapSize != hmapSize {
+               t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
+       }
+}
+
+func TestLoadFactor(t *testing.T) {
+       for b := uint8(0); b < 20; b++ {
+               count := 13 * (1 << b) / 2 // 6.5
+               if b == 0 {
+                       count = 8
+               }
+               if runtime.OverLoadFactor(count, b) {
+                       t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
+               }
+               if !runtime.OverLoadFactor(count+1, b) {
+                       t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
+               }
+       }
+}
+
 func TestMapIterOrder(t *testing.T) {
        sizes := []int{3, 7, 9, 15}
        if abi.OldMapBucketCountBits >= 5 {
index 590fccc407a6b1f2637dc6df31b8f99331efc40b..bd0d05d092c67e59cd8729fc7e321e6b46fae198 100644 (file)
 
 package runtime
 
-// This file contains the implementation of Go's map type.
-//
-// A map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/elem pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index).  To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times).  When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and elems)
-//  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
-//        4.00         2.13        20.77         3.00         4.00
-//        4.50         4.05        17.30         3.25         4.50
-//        5.00         6.85        14.77         3.50         5.00
-//        5.50        10.55        12.94         3.75         5.50
-//        6.00        15.27        11.67         4.00         6.00
-//        6.50        20.90        10.79         4.25         6.50
-//        7.00        27.14        10.15         4.50         7.00
-//        7.50        34.03         9.73         4.75         7.50
-//        8.00        41.10         9.40         5.00         8.00
-//
-// %overflow   = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/elem pair
-// hitprobe    = # of entries to check when looking up a present key
-// missprobe   = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
 import (
        "internal/abi"
-       "internal/goarch"
-       "internal/runtime/atomic"
+       "internal/runtime/maps"
        "internal/runtime/math"
        "internal/runtime/sys"
        "unsafe"
 )
 
-type maptype = abi.SwissMapType
-
 const (
-       // Maximum number of key/elem pairs a bucket can hold.
-       bucketCntBits = abi.SwissMapBucketCountBits
-
-       // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
-       // Because of minimum alignment rules, bucketCnt is known to be at least 8.
-       // Represent as loadFactorNum/loadFactorDen, to allow integer math.
-       loadFactorDen = 2
-       loadFactorNum = loadFactorDen * abi.SwissMapBucketCount * 13 / 16
-
-       // data offset should be the size of the bmap struct, but needs to be
-       // aligned correctly. For amd64p32 this means 64-bit alignment
-       // even though pointers are 32 bit.
-       dataOffset = unsafe.Offsetof(struct {
-               b bmap
-               v int64
-       }{}.v)
-
-       // Possible tophash values. We reserve a few possibilities for special marks.
-       // Each bucket (including its overflow buckets, if any) will have either all or none of its
-       // entries in the evacuated* states (except during the evacuate() method, which only happens
-       // during map writes and thus no one else can observe the map during that time).
-       emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
-       emptyOne       = 1 // this cell is empty
-       evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
-       evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
-       evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
-       minTopHash     = 5 // minimum tophash for a normal filled cell.
-
-       // flags
-       iterator     = 1 // there may be an iterator using buckets
-       oldIterator  = 2 // there may be an iterator using oldbuckets
-       hashWriting  = 4 // a goroutine is writing to the map
-       sameSizeGrow = 8 // the current map growth is to a new map of the same size
-
-       // sentinel bucket ID for iterator checks
-       noCheck = 1<<(8*goarch.PtrSize) - 1
+       // TODO: remove? These are used by tests but not the actual map
+       loadFactorNum = 7
+       loadFactorDen = 8
 )
 
-// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
-func isEmpty(x uint8) bool {
-       return x <= emptyOne
-}
-
-// A header for a Go map.
-type hmap struct {
-       // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
-       // Make sure this stays in sync with the compiler's definition.
-       count     int // # live cells == size of map.  Must be first (used by len() builtin)
-       flags     uint8
-       B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
-       noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
-       hash0     uint32 // hash seed
-
-       buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
-       oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
-       nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
-
-       extra *mapextra // optional fields
-}
-
-// mapextra holds fields that are not present on all maps.
-type mapextra struct {
-       // If both key and elem do not contain pointers and are inline, then we mark bucket
-       // type as containing no pointers. This avoids scanning such maps.
-       // However, bmap.overflow is a pointer. In order to keep overflow buckets
-       // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
-       // overflow and oldoverflow are only used if key and elem do not contain pointers.
-       // overflow contains overflow buckets for hmap.buckets.
-       // oldoverflow contains overflow buckets for hmap.oldbuckets.
-       // The indirection allows to store a pointer to the slice in hiter.
-       overflow    *[]*bmap
-       oldoverflow *[]*bmap
-
-       // nextOverflow holds a pointer to a free overflow bucket.
-       nextOverflow *bmap
-}
-
-// A bucket for a Go map.
-type bmap struct {
-       // tophash generally contains the top byte of the hash value
-       // for each key in this bucket. If tophash[0] < minTopHash,
-       // tophash[0] is a bucket evacuation state instead.
-       tophash [abi.SwissMapBucketCount]uint8
-       // Followed by bucketCnt keys and then bucketCnt elems.
-       // NOTE: packing all the keys together and then all the elems together makes the
-       // code a bit more complicated than alternating key/elem/key/elem/... but it allows
-       // us to eliminate padding which would be needed for, e.g., map[int64]int8.
-       // Followed by an overflow pointer.
-}
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
-// and reflect/value.go to match the layout of this structure.
-type hiter struct {
-       key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
-       elem        unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
-       t           *maptype
-       h           *hmap
-       buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
-       bptr        *bmap          // current bucket
-       overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
-       oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
-       startBucket uintptr        // bucket iteration started at
-       offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
-       wrapped     bool           // already wrapped around from end of bucket array to beginning
-       B           uint8
-       i           uint8
-       bucket      uintptr
-       checkBucket uintptr
-}
-
-// bucketShift returns 1<<b, optimized for code generation.
-func bucketShift(b uint8) uintptr {
-       // Masking the shift amount allows overflow checks to be elided.
-       return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
-}
-
-// bucketMask returns 1<<b - 1, optimized for code generation.
-func bucketMask(b uint8) uintptr {
-       return bucketShift(b) - 1
-}
+type maptype = abi.SwissMapType
 
-// tophash calculates the tophash value for hash.
-func tophash(hash uintptr) uint8 {
-       top := uint8(hash >> (goarch.PtrSize*8 - 8))
-       if top < minTopHash {
-               top += minTopHash
+func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map {
+       if int64(int(hint)) != hint {
+               hint = 0
        }
-       return top
-}
-
-func evacuated(b *bmap) bool {
-       h := b.tophash[0]
-       return h > emptyOne && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
-       return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
+       return makemap(t, int(hint), m)
 }
 
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
-       *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
-}
-
-func (b *bmap) keys() unsafe.Pointer {
-       return add(unsafe.Pointer(b), dataOffset)
+// makemap_small implements Go map creation for make(map[k]v) and
+// make(map[k]v, hint) when hint is known to be at most bucketCnt
+// at compile time and the map needs to be allocated on the heap.
+func makemap_small() *maps.Map {
+       panic("unimplemented")
 }
 
-// incrnoverflow increments h.noverflow.
-// noverflow counts the number of overflow buckets.
-// This is used to trigger same-size map growth.
-// See also tooManyOverflowBuckets.
-// To keep hmap small, noverflow is a uint16.
-// When there are few buckets, noverflow is an exact count.
-// When there are many buckets, noverflow is an approximate count.
-func (h *hmap) incrnoverflow() {
-       // We trigger same-size map growth if there are
-       // as many overflow buckets as buckets.
-       // We need to be able to count to 1<<h.B.
-       if h.B < 16 {
-               h.noverflow++
-               return
-       }
-       // Increment with probability 1/(1<<(h.B-15)).
-       // When we reach 1<<15 - 1, we will have approximately
-       // as many overflow buckets as buckets.
-       mask := uint32(1)<<(h.B-15) - 1
-       // Example: if h.B == 18, then mask == 7,
-       // and rand() & 7 == 0 with probability 1/8.
-       if uint32(rand())&mask == 0 {
-               h.noverflow++
+// checkHint verifies that hint is reasonable, adjusting as necessary.
+func checkHint(t *abi.SwissMapType, hint int) uint64 {
+       if hint <= 0 {
+               return 0
        }
-}
 
-func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
-       var ovf *bmap
-       if h.extra != nil && h.extra.nextOverflow != nil {
-               // We have preallocated overflow buckets available.
-               // See makeBucketArray for more details.
-               ovf = h.extra.nextOverflow
-               if ovf.overflow(t) == nil {
-                       // We're not at the end of the preallocated overflow buckets. Bump the pointer.
-                       h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
-               } else {
-                       // This is the last preallocated overflow bucket.
-                       // Reset the overflow pointer on this bucket,
-                       // which was set to a non-nil sentinel value.
-                       ovf.setoverflow(t, nil)
-                       h.extra.nextOverflow = nil
+       capacity := uint64(hint)
+
+       // Ensure a groups allocation for a capacity this high doesn't exceed
+       // the maximum allocation size.
+       //
+       // TODO(prattmic): Once we split tables, a large hint will result in
+       // splitting the tables up front, which will use smaller individual
+       // allocations.
+       //
+       // TODO(prattmic): This logic is largely duplicated from maps.newTable
+       // / maps.(*table).reset.
+       capacity, overflow := alignUpPow2(capacity)
+       if !overflow {
+               groupCount := capacity / abi.SwissMapGroupSlots
+               mem, overflow := math.MulUintptr(uintptr(groupCount), t.Group.Size_)
+               if overflow || mem > maxAlloc {
+                       return 0
                }
        } else {
-               ovf = (*bmap)(newobject(t.Bucket))
-       }
-       h.incrnoverflow()
-       if !t.Bucket.Pointers() {
-               h.createOverflow()
-               *h.extra.overflow = append(*h.extra.overflow, ovf)
-       }
-       b.setoverflow(t, ovf)
-       return ovf
-}
-
-func (h *hmap) createOverflow() {
-       if h.extra == nil {
-               h.extra = new(mapextra)
-       }
-       if h.extra.overflow == nil {
-               h.extra.overflow = new([]*bmap)
-       }
-}
-
-func makemap64(t *maptype, hint int64, h *hmap) *hmap {
-       if int64(int(hint)) != hint {
-               hint = 0
+               return 0
        }
-       return makemap(t, int(hint), h)
-}
 
-// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most bucketCnt
-// at compile time and the map needs to be allocated on the heap.
-func makemap_small() *hmap {
-       h := new(hmap)
-       h.hash0 = uint32(rand())
-       return h
+       return capacity
 }
 
 // makemap implements Go map creation for make(map[k]v, hint).
@@ -299,90 +72,27 @@ func makemap_small() *hmap {
 // can be created on the stack, h and/or bucket may be non-nil.
 // If h != nil, the map can be created directly in h.
 // If h.buckets != nil, bucket pointed to can be used as the first bucket.
-func makemap(t *maptype, hint int, h *hmap) *hmap {
-       mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
-       if overflow || mem > maxAlloc {
-               hint = 0
-       }
-
-       // initialize Hmap
-       if h == nil {
-               h = new(hmap)
-       }
-       h.hash0 = uint32(rand())
+func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map {
+       capacity := checkHint(t, hint)
 
-       // Find the size parameter B which will hold the requested # of elements.
-       // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
-       B := uint8(0)
-       for overLoadFactor(hint, B) {
-               B++
-       }
-       h.B = B
-
-       // allocate initial hash table
-       // if B == 0, the buckets field is allocated lazily later (in mapassign)
-       // If hint is large zeroing this memory could take a while.
-       if h.B != 0 {
-               var nextOverflow *bmap
-               h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
-               if nextOverflow != nil {
-                       h.extra = new(mapextra)
-                       h.extra.nextOverflow = nextOverflow
-               }
-       }
-
-       return h
+       // TODO: use existing m
+       return maps.NewTable(t, capacity)
 }
 
-// makeBucketArray initializes a backing array for map buckets.
-// 1<<b is the minimum number of buckets to allocate.
-// dirtyalloc should either be nil or a bucket array previously
-// allocated by makeBucketArray with the same t and b parameters.
-// If dirtyalloc is nil a new backing array will be alloced and
-// otherwise dirtyalloc will be cleared and reused as backing array.
-func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
-       base := bucketShift(b)
-       nbuckets := base
-       // For small b, overflow buckets are unlikely.
-       // Avoid the overhead of the calculation.
-       if b >= 4 {
-               // Add on the estimated number of overflow buckets
-               // required to insert the median number of elements
-               // used with this value of b.
-               nbuckets += bucketShift(b - 4)
-               sz := t.Bucket.Size_ * nbuckets
-               up := roundupsize(sz, !t.Bucket.Pointers())
-               if up != sz {
-                       nbuckets = up / t.Bucket.Size_
-               }
-       }
-
-       if dirtyalloc == nil {
-               buckets = newarray(t.Bucket, int(nbuckets))
-       } else {
-               // dirtyalloc was previously generated by
-               // the above newarray(t.Bucket, int(nbuckets))
-               // but may not be empty.
-               buckets = dirtyalloc
-               size := t.Bucket.Size_ * nbuckets
-               if t.Bucket.Pointers() {
-                       memclrHasPointers(buckets, size)
-               } else {
-                       memclrNoHeapPointers(buckets, size)
-               }
+// alignUpPow2 rounds n up to the next power of 2.
+//
+// Returns true if round up causes overflow.
+//
+// TODO(prattmic): deduplicate from internal/runtime/maps.
+func alignUpPow2(n uint64) (uint64, bool) {
+       if n == 0 {
+               return 0, false
        }
-
-       if base != nbuckets {
-               // We preallocated some overflow buckets.
-               // To keep the overhead of tracking these overflow buckets to a minimum,
-               // we use the convention that if a preallocated overflow bucket's overflow
-               // pointer is nil, then there are more available by bumping the pointer.
-               // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
-               nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
-               last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
-               last.setoverflow(t, (*bmap)(buckets))
+       v := (uint64(1) << sys.Len64(n-1))
+       if v == 0 {
+               return 0, true
        }
-       return buckets, nextOverflow
+       return v, false
 }
 
 // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
@@ -390,197 +100,89 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
 // the key is not in the map.
 // NOTE: The returned pointer may keep the whole map live, so don't
 // hold onto it for very long.
-func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       if raceenabled && h != nil {
+func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+       // TODO: concurrent checks.
+       if raceenabled && m != nil {
                callerpc := sys.GetCallerPC()
                pc := abi.FuncPCABIInternal(mapaccess1)
-               racereadpc(unsafe.Pointer(h), callerpc, pc)
+               racereadpc(unsafe.Pointer(m), callerpc, pc)
                raceReadObjectPC(t.Key, key, callerpc, pc)
        }
-       if msanenabled && h != nil {
+       if msanenabled && m != nil {
                msanread(key, t.Key.Size_)
        }
-       if asanenabled && h != nil {
+       if asanenabled && m != nil {
                asanread(key, t.Key.Size_)
        }
-       if h == nil || h.count == 0 {
+
+       if m == nil || m.Used() == 0 {
                if err := mapKeyError(t, key); err != nil {
                        panic(err) // see issue 23734
                }
                return unsafe.Pointer(&zeroVal[0])
        }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-bucketloop:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if t.Key.Equal(key, k) {
-                               e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       e = *((*unsafe.Pointer)(e))
-                               }
-                               return e
-                       }
-               }
+
+       elem, ok := m.Get(key)
+       if !ok {
+               return unsafe.Pointer(&zeroVal[0])
        }
-       return unsafe.Pointer(&zeroVal[0])
+       return elem
 }
 
-func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
-       if raceenabled && h != nil {
+func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
+       // TODO: concurrent checks.
+       if raceenabled && m != nil {
                callerpc := sys.GetCallerPC()
                pc := abi.FuncPCABIInternal(mapaccess2)
-               racereadpc(unsafe.Pointer(h), callerpc, pc)
+               racereadpc(unsafe.Pointer(m), callerpc, pc)
                raceReadObjectPC(t.Key, key, callerpc, pc)
        }
-       if msanenabled && h != nil {
+       if msanenabled && m != nil {
                msanread(key, t.Key.Size_)
        }
-       if asanenabled && h != nil {
+       if asanenabled && m != nil {
                asanread(key, t.Key.Size_)
        }
-       if h == nil || h.count == 0 {
+
+       if m == nil || m.Used() == 0 {
                if err := mapKeyError(t, key); err != nil {
                        panic(err) // see issue 23734
                }
                return unsafe.Pointer(&zeroVal[0]), false
        }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map read and map write")
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-bucketloop:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if t.Key.Equal(key, k) {
-                               e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       e = *((*unsafe.Pointer)(e))
-                               }
-                               return e, true
-                       }
-               }
-       }
-       return unsafe.Pointer(&zeroVal[0]), false
-}
 
-// returns both key and elem. Used by map iterator.
-func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
-       if h == nil || h.count == 0 {
-               return nil, nil
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-       m := bucketMask(h.B)
-       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
-       if c := h.oldbuckets; c != nil {
-               if !h.sameSizeGrow() {
-                       // There used to be half as many buckets; mask down one more power of two.
-                       m >>= 1
-               }
-               oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
-               if !evacuated(oldb) {
-                       b = oldb
-               }
-       }
-       top := tophash(hash)
-bucketloop:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if t.Key.Equal(key, k) {
-                               e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       e = *((*unsafe.Pointer)(e))
-                               }
-                               return k, e
-                       }
-               }
+       elem, ok := m.Get(key)
+       if !ok {
+               return unsafe.Pointer(&zeroVal[0]), false
        }
-       return nil, nil
+       return elem, true
 }
 
-func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
-       e := mapaccess1(t, h, key)
+func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
+       e := mapaccess1(t, m, key)
        if e == unsafe.Pointer(&zeroVal[0]) {
                return zero
        }
        return e
 }
 
-func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
-       e := mapaccess1(t, h, key)
+func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+       e := mapaccess1(t, m, key)
        if e == unsafe.Pointer(&zeroVal[0]) {
                return zero, false
        }
        return e, true
 }
 
-// Like mapaccess, but allocates a slot for the key if it is not present in the map.
-func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       if h == nil {
+func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+       // TODO: concurrent checks.
+       if m == nil {
                panic(plainError("assignment to entry in nil map"))
        }
        if raceenabled {
                callerpc := sys.GetCallerPC()
                pc := abi.FuncPCABIInternal(mapassign)
-               racewritepc(unsafe.Pointer(h), callerpc, pc)
+               racewritepc(unsafe.Pointer(m), callerpc, pc)
                raceReadObjectPC(t.Key, key, callerpc, pc)
        }
        if msanenabled {
@@ -589,762 +191,91 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
        if asanenabled {
                asanread(key, t.Key.Size_)
        }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-       hash := t.Hasher(key, uintptr(h.hash0))
-
-       // Set hashWriting after calling t.hasher, since t.hasher may panic,
-       // in which case we have not actually done a write.
-       h.flags ^= hashWriting
 
-       if h.buckets == nil {
-               h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
-       }
-
-again:
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       top := tophash(hash)
-
-       var inserti *uint8
-       var insertk unsafe.Pointer
-       var elem unsafe.Pointer
-bucketloop:
-       for {
-               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if isEmpty(b.tophash[i]) && inserti == nil {
-                                       inserti = &b.tophash[i]
-                                       insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                                       elem = add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               }
-                               if b.tophash[i] == emptyRest {
-                                       break bucketloop
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if !t.Key.Equal(key, k) {
-                               continue
-                       }
-                       // already have a mapping for key. Update it.
-                       if t.NeedKeyUpdate() {
-                               typedmemmove(t.Key, k, key)
-                       }
-                       elem = add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                       goto done
-               }
-               ovf := b.overflow(t)
-               if ovf == nil {
-                       break
-               }
-               b = ovf
-       }
-
-       // Did not find mapping for key. Allocate new cell & add entry.
-
-       // If we hit the max load factor or we have too many overflow buckets,
-       // and we're not already in the middle of growing, start growing.
-       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
-               hashGrow(t, h)
-               goto again // Growing the table invalidates everything, so try again
-       }
-
-       if inserti == nil {
-               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
-               newb := h.newoverflow(t, b)
-               inserti = &newb.tophash[0]
-               insertk = add(unsafe.Pointer(newb), dataOffset)
-               elem = add(insertk, abi.SwissMapBucketCount*uintptr(t.KeySize))
-       }
-
-       // store new key/elem at insert position
-       if t.IndirectKey() {
-               kmem := newobject(t.Key)
-               *(*unsafe.Pointer)(insertk) = kmem
-               insertk = kmem
-       }
-       if t.IndirectElem() {
-               vmem := newobject(t.Elem)
-               *(*unsafe.Pointer)(elem) = vmem
-       }
-       typedmemmove(t.Key, insertk, key)
-       *inserti = top
-       h.count++
-
-done:
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-       if t.IndirectElem() {
-               elem = *((*unsafe.Pointer)(elem))
-       }
-       return elem
+       return m.PutSlot(key)
 }
 
-func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
-       if raceenabled && h != nil {
+func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+       // TODO: concurrent checks.
+       if raceenabled && m != nil {
                callerpc := sys.GetCallerPC()
                pc := abi.FuncPCABIInternal(mapdelete)
-               racewritepc(unsafe.Pointer(h), callerpc, pc)
+               racewritepc(unsafe.Pointer(m), callerpc, pc)
                raceReadObjectPC(t.Key, key, callerpc, pc)
        }
-       if msanenabled && h != nil {
+       if msanenabled && m != nil {
                msanread(key, t.Key.Size_)
        }
-       if asanenabled && h != nil {
+       if asanenabled && m != nil {
                asanread(key, t.Key.Size_)
        }
-       if h == nil || h.count == 0 {
+
+       if m == nil || m.Used() == 0 {
                if err := mapKeyError(t, key); err != nil {
                        panic(err) // see issue 23734
                }
                return
        }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-
-       hash := t.Hasher(key, uintptr(h.hash0))
 
-       // Set hashWriting after calling t.hasher, since t.hasher may panic,
-       // in which case we have not actually done a write (delete).
-       h.flags ^= hashWriting
-
-       bucket := hash & bucketMask(h.B)
-       if h.growing() {
-               growWork(t, h, bucket)
-       }
-       b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-       bOrig := b
-       top := tophash(hash)
-search:
-       for ; b != nil; b = b.overflow(t) {
-               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                       if b.tophash[i] != top {
-                               if b.tophash[i] == emptyRest {
-                                       break search
-                               }
-                               continue
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
-                       k2 := k
-                       if t.IndirectKey() {
-                               k2 = *((*unsafe.Pointer)(k2))
-                       }
-                       if !t.Key.Equal(key, k2) {
-                               continue
-                       }
-                       // Only clear key if there are pointers in it.
-                       if t.IndirectKey() {
-                               *(*unsafe.Pointer)(k) = nil
-                       } else if t.Key.Pointers() {
-                               memclrHasPointers(k, t.Key.Size_)
-                       }
-                       e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                       if t.IndirectElem() {
-                               *(*unsafe.Pointer)(e) = nil
-                       } else if t.Elem.Pointers() {
-                               memclrHasPointers(e, t.Elem.Size_)
-                       } else {
-                               memclrNoHeapPointers(e, t.Elem.Size_)
-                       }
-                       b.tophash[i] = emptyOne
-                       // If the bucket now ends in a bunch of emptyOne states,
-                       // change those to emptyRest states.
-                       // It would be nice to make this a separate function, but
-                       // for loops are not currently inlineable.
-                       if i == abi.SwissMapBucketCount-1 {
-                               if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
-                                       goto notLast
-                               }
-                       } else {
-                               if b.tophash[i+1] != emptyRest {
-                                       goto notLast
-                               }
-                       }
-                       for {
-                               b.tophash[i] = emptyRest
-                               if i == 0 {
-                                       if b == bOrig {
-                                               break // beginning of initial bucket, we're done.
-                                       }
-                                       // Find previous bucket, continue at its last entry.
-                                       c := b
-                                       for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
-                                       }
-                                       i = abi.SwissMapBucketCount - 1
-                               } else {
-                                       i--
-                               }
-                               if b.tophash[i] != emptyOne {
-                                       break
-                               }
-                       }
-               notLast:
-                       h.count--
-                       // Reset the hash seed to make it more difficult for attackers to
-                       // repeatedly trigger hash collisions. See issue 25237.
-                       if h.count == 0 {
-                               h.hash0 = uint32(rand())
-                       }
-                       break search
-               }
-       }
-
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
+       m.Delete(key)
 }
 
-// mapiterinit initializes the hiter struct used for ranging over maps.
-// The hiter struct pointed to by 'it' is allocated on the stack
+// mapiterinit initializes the Iter struct used for ranging over maps.
+// The Iter struct pointed to by 'it' is allocated on the stack
 // by the compilers order pass or on the heap by reflect_mapiterinit.
 // Both need to have zeroed hiter since the struct contains pointers.
-func mapiterinit(t *maptype, h *hmap, it *hiter) {
-       if raceenabled && h != nil {
+func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+       if raceenabled && m != nil {
                callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
-       }
-
-       it.t = t
-       if h == nil || h.count == 0 {
-               return
-       }
-
-       if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
-               throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
-       }
-       it.h = h
-
-       // grab snapshot of bucket state
-       it.B = h.B
-       it.buckets = h.buckets
-       if !t.Bucket.Pointers() {
-               // Allocate the current slice and remember pointers to both current and old.
-               // This preserves all relevant overflow buckets alive even if
-               // the table grows and/or overflow buckets are added to the table
-               // while we are iterating.
-               h.createOverflow()
-               it.overflow = h.extra.overflow
-               it.oldoverflow = h.extra.oldoverflow
+               racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit))
        }
 
-       // decide where to start
-       r := uintptr(rand())
-       it.startBucket = r & bucketMask(h.B)
-       it.offset = uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
-
-       // iterator state
-       it.bucket = it.startBucket
-
-       // Remember we have an iterator.
-       // Can run concurrently with another mapiterinit().
-       if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
-               atomic.Or8(&h.flags, iterator|oldIterator)
-       }
-
-       mapiternext(it)
+       it.Init(t, m)
+       it.Next()
 }
 
-func mapiternext(it *hiter) {
-       h := it.h
+func mapiternext(it *maps.Iter) {
+       // TODO: concurrent checks.
        if raceenabled {
                callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
+               racereadpc(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapiternext))
        }
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map iteration and map write")
-       }
-       t := it.t
-       bucket := it.bucket
-       b := it.bptr
-       i := it.i
-       checkBucket := it.checkBucket
 
-next:
-       if b == nil {
-               if bucket == it.startBucket && it.wrapped {
-                       // end of iteration
-                       it.key = nil
-                       it.elem = nil
-                       return
-               }
-               if h.growing() && it.B == h.B {
-                       // Iterator was started in the middle of a grow, and the grow isn't done yet.
-                       // If the bucket we're looking at hasn't been filled in yet (i.e. the old
-                       // bucket hasn't been evacuated) then we need to iterate through the old
-                       // bucket and only return the ones that will be migrated to this bucket.
-                       oldbucket := bucket & it.h.oldbucketmask()
-                       b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
-                       if !evacuated(b) {
-                               checkBucket = bucket
-                       } else {
-                               b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
-                               checkBucket = noCheck
-                       }
-               } else {
-                       b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
-                       checkBucket = noCheck
-               }
-               bucket++
-               if bucket == bucketShift(it.B) {
-                       bucket = 0
-                       it.wrapped = true
-               }
-               i = 0
-       }
-       for ; i < abi.SwissMapBucketCount; i++ {
-               offi := (i + it.offset) & (abi.SwissMapBucketCount - 1)
-               if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
-                       // TODO: emptyRest is hard to use here, as we start iterating
-                       // in the middle of a bucket. It's feasible, just tricky.
-                       continue
-               }
-               k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
-               if t.IndirectKey() {
-                       k = *((*unsafe.Pointer)(k))
-               }
-               e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
-               if checkBucket != noCheck && !h.sameSizeGrow() {
-                       // Special case: iterator was started during a grow to a larger size
-                       // and the grow is not done yet. We're working on a bucket whose
-                       // oldbucket has not been evacuated yet. Or at least, it wasn't
-                       // evacuated when we started the bucket. So we're iterating
-                       // through the oldbucket, skipping any keys that will go
-                       // to the other new bucket (each oldbucket expands to two
-                       // buckets during a grow).
-                       if t.ReflexiveKey() || t.Key.Equal(k, k) {
-                               // If the item in the oldbucket is not destined for
-                               // the current new bucket in the iteration, skip it.
-                               hash := t.Hasher(k, uintptr(h.hash0))
-                               if hash&bucketMask(it.B) != checkBucket {
-                                       continue
-                               }
-                       } else {
-                               // Hash isn't repeatable if k != k (NaNs).  We need a
-                               // repeatable and randomish choice of which direction
-                               // to send NaNs during evacuation. We'll use the low
-                               // bit of tophash to decide which way NaNs go.
-                               // NOTE: this case is why we need two evacuate tophash
-                               // values, evacuatedX and evacuatedY, that differ in
-                               // their low bit.
-                               if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
-                                       continue
-                               }
-                       }
-               }
-               if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
-                       !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
-                       // This is the golden data, we can return it.
-                       // OR
-                       // key!=key, so the entry can't be deleted or updated, so we can just return it.
-                       // That's lucky for us because when key!=key we can't look it up successfully.
-                       it.key = k
-                       if t.IndirectElem() {
-                               e = *((*unsafe.Pointer)(e))
-                       }
-                       it.elem = e
-               } else {
-                       // The hash table has grown since the iterator was started.
-                       // The golden data for this key is now somewhere else.
-                       // Check the current hash table for the data.
-                       // This code handles the case where the key
-                       // has been deleted, updated, or deleted and reinserted.
-                       // NOTE: we need to regrab the key as it has potentially been
-                       // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
-                       rk, re := mapaccessK(t, h, k)
-                       if rk == nil {
-                               continue // key has been deleted
-                       }
-                       it.key = rk
-                       it.elem = re
-               }
-               it.bucket = bucket
-               if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
-                       it.bptr = b
-               }
-               it.i = i + 1
-               it.checkBucket = checkBucket
-               return
-       }
-       b = b.overflow(t)
-       i = 0
-       goto next
+       it.Next()
 }
 
 // mapclear deletes all keys from a map.
-func mapclear(t *maptype, h *hmap) {
-       if raceenabled && h != nil {
+func mapclear(t *abi.SwissMapType, m *maps.Map) {
+       // TODO: concurrent checks.
+       if raceenabled && m != nil {
                callerpc := sys.GetCallerPC()
                pc := abi.FuncPCABIInternal(mapclear)
-               racewritepc(unsafe.Pointer(h), callerpc, pc)
+               racewritepc(unsafe.Pointer(m), callerpc, pc)
        }
 
-       if h == nil || h.count == 0 {
+       if m == nil || m.Used() == 0 {
                return
        }
 
-       if h.flags&hashWriting != 0 {
-               fatal("concurrent map writes")
-       }
-
-       h.flags ^= hashWriting
-
-       // Mark buckets empty, so existing iterators can be terminated, see issue #59411.
-       markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
-               for i := uintptr(0); i <= mask; i++ {
-                       b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
-                       for ; b != nil; b = b.overflow(t) {
-                               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                                       b.tophash[i] = emptyRest
-                               }
-                       }
-               }
-       }
-       markBucketsEmpty(h.buckets, bucketMask(h.B))
-       if oldBuckets := h.oldbuckets; oldBuckets != nil {
-               markBucketsEmpty(oldBuckets, h.oldbucketmask())
-       }
-
-       h.flags &^= sameSizeGrow
-       h.oldbuckets = nil
-       h.nevacuate = 0
-       h.noverflow = 0
-       h.count = 0
-
-       // Reset the hash seed to make it more difficult for attackers to
-       // repeatedly trigger hash collisions. See issue 25237.
-       h.hash0 = uint32(rand())
-
-       // Keep the mapextra allocation but clear any extra information.
-       if h.extra != nil {
-               *h.extra = mapextra{}
-       }
-
-       // makeBucketArray clears the memory pointed to by h.buckets
-       // and recovers any overflow buckets by generating them
-       // as if h.buckets was newly alloced.
-       _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
-       if nextOverflow != nil {
-               // If overflow buckets are created then h.extra
-               // will have been allocated during initial bucket creation.
-               h.extra.nextOverflow = nextOverflow
-       }
-
-       if h.flags&hashWriting == 0 {
-               fatal("concurrent map writes")
-       }
-       h.flags &^= hashWriting
-}
-
-func hashGrow(t *maptype, h *hmap) {
-       // If we've hit the load factor, get bigger.
-       // Otherwise, there are too many overflow buckets,
-       // so keep the same number of buckets and "grow" laterally.
-       bigger := uint8(1)
-       if !overLoadFactor(h.count+1, h.B) {
-               bigger = 0
-               h.flags |= sameSizeGrow
-       }
-       oldbuckets := h.buckets
-       newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
-
-       flags := h.flags &^ (iterator | oldIterator)
-       if h.flags&iterator != 0 {
-               flags |= oldIterator
-       }
-       // commit the grow (atomic wrt gc)
-       h.B += bigger
-       h.flags = flags
-       h.oldbuckets = oldbuckets
-       h.buckets = newbuckets
-       h.nevacuate = 0
-       h.noverflow = 0
-
-       if h.extra != nil && h.extra.overflow != nil {
-               // Promote current overflow buckets to the old generation.
-               if h.extra.oldoverflow != nil {
-                       throw("oldoverflow is not nil")
-               }
-               h.extra.oldoverflow = h.extra.overflow
-               h.extra.overflow = nil
-       }
-       if nextOverflow != nil {
-               if h.extra == nil {
-                       h.extra = new(mapextra)
-               }
-               h.extra.nextOverflow = nextOverflow
-       }
-
-       // the actual copying of the hash table data is done incrementally
-       // by growWork() and evacuate().
-}
-
-// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
-func overLoadFactor(count int, B uint8) bool {
-       return count > abi.SwissMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
-}
-
-// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
-// Note that most of these overflow buckets must be in sparse use;
-// if use was dense, then we'd have already triggered regular map growth.
-func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
-       // If the threshold is too low, we do extraneous work.
-       // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
-       // "too many" means (approximately) as many overflow buckets as regular buckets.
-       // See incrnoverflow for more details.
-       if B > 15 {
-               B = 15
-       }
-       // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
-       return noverflow >= uint16(1)<<(B&15)
-}
-
-// growing reports whether h is growing. The growth may be to the same size or bigger.
-func (h *hmap) growing() bool {
-       return h.oldbuckets != nil
-}
-
-// sameSizeGrow reports whether the current growth is to a map of the same size.
-func (h *hmap) sameSizeGrow() bool {
-       return h.flags&sameSizeGrow != 0
-}
-
-//go:linkname sameSizeGrowForIssue69110Test
-func sameSizeGrowForIssue69110Test(h *hmap) bool {
-       return h.sameSizeGrow()
-}
-
-// noldbuckets calculates the number of buckets prior to the current map growth.
-func (h *hmap) noldbuckets() uintptr {
-       oldB := h.B
-       if !h.sameSizeGrow() {
-               oldB--
-       }
-       return bucketShift(oldB)
-}
-
-// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
-func (h *hmap) oldbucketmask() uintptr {
-       return h.noldbuckets() - 1
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
-       // make sure we evacuate the oldbucket corresponding
-       // to the bucket we're about to use
-       evacuate(t, h, bucket&h.oldbucketmask())
-
-       // evacuate one more oldbucket to make progress on growing
-       if h.growing() {
-               evacuate(t, h, h.nevacuate)
-       }
-}
-
-func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
-       b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
-       return evacuated(b)
-}
-
-// evacDst is an evacuation destination.
-type evacDst struct {
-       b *bmap          // current destination bucket
-       i int            // key/elem index into b
-       k unsafe.Pointer // pointer to current key storage
-       e unsafe.Pointer // pointer to current elem storage
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
-       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
-       newbit := h.noldbuckets()
-       if !evacuated(b) {
-               // TODO: reuse overflow buckets instead of using new ones, if there
-               // is no iterator using the old buckets.  (If !oldIterator.)
-
-               // xy contains the x and y (low and high) evacuation destinations.
-               var xy [2]evacDst
-               x := &xy[0]
-               x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
-               x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.e = add(x.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
-
-               if !h.sameSizeGrow() {
-                       // Only calculate y pointers if we're growing bigger.
-                       // Otherwise GC can see bad pointers.
-                       y := &xy[1]
-                       y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
-                       y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.e = add(y.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
-               }
-
-               for ; b != nil; b = b.overflow(t) {
-                       k := add(unsafe.Pointer(b), dataOffset)
-                       e := add(k, abi.SwissMapBucketCount*uintptr(t.KeySize))
-                       for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
-                               top := b.tophash[i]
-                               if isEmpty(top) {
-                                       b.tophash[i] = evacuatedEmpty
-                                       continue
-                               }
-                               if top < minTopHash {
-                                       throw("bad map state")
-                               }
-                               k2 := k
-                               if t.IndirectKey() {
-                                       k2 = *((*unsafe.Pointer)(k2))
-                               }
-                               var useY uint8
-                               if !h.sameSizeGrow() {
-                                       // Compute hash to make our evacuation decision (whether we need
-                                       // to send this key/elem to bucket x or bucket y).
-                                       hash := t.Hasher(k2, uintptr(h.hash0))
-                                       if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
-                                               // If key != key (NaNs), then the hash could be (and probably
-                                               // will be) entirely different from the old hash. Moreover,
-                                               // it isn't reproducible. Reproducibility is required in the
-                                               // presence of iterators, as our evacuation decision must
-                                               // match whatever decision the iterator made.
-                                               // Fortunately, we have the freedom to send these keys either
-                                               // way. Also, tophash is meaningless for these kinds of keys.
-                                               // We let the low bit of tophash drive the evacuation decision.
-                                               // We recompute a new random tophash for the next level so
-                                               // these keys will get evenly distributed across all buckets
-                                               // after multiple grows.
-                                               useY = top & 1
-                                               top = tophash(hash)
-                                       } else {
-                                               if hash&newbit != 0 {
-                                                       useY = 1
-                                               }
-                                       }
-                               }
-
-                               if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
-                                       throw("bad evacuatedN")
-                               }
-
-                               b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
-                               dst := &xy[useY]                 // evacuation destination
-
-                               if dst.i == abi.SwissMapBucketCount {
-                                       dst.b = h.newoverflow(t, dst.b)
-                                       dst.i = 0
-                                       dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.e = add(dst.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
-                               }
-                               dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-                               if t.IndirectKey() {
-                                       *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
-                               } else {
-                                       typedmemmove(t.Key, dst.k, k) // copy elem
-                               }
-                               if t.IndirectElem() {
-                                       *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
-                               } else {
-                                       typedmemmove(t.Elem, dst.e, e)
-                               }
-                               dst.i++
-                               // These updates might push these pointers past the end of the
-                               // key or elem arrays.  That's ok, as we have the overflow pointer
-                               // at the end of the bucket to protect against pointing past the
-                               // end of the bucket.
-                               dst.k = add(dst.k, uintptr(t.KeySize))
-                               dst.e = add(dst.e, uintptr(t.ValueSize))
-                       }
-               }
-               // Unlink the overflow buckets & clear key/elem to help GC.
-               if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
-                       b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
-                       // Preserve b.tophash because the evacuation
-                       // state is maintained there.
-                       ptr := add(b, dataOffset)
-                       n := uintptr(t.BucketSize) - dataOffset
-                       memclrHasPointers(ptr, n)
-               }
-       }
-
-       if oldbucket == h.nevacuate {
-               advanceEvacuationMark(h, t, newbit)
-       }
-}
-
-func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
-       h.nevacuate++
-       // Experiments suggest that 1024 is overkill by at least an order of magnitude.
-       // Put it in there as a safeguard anyway, to ensure O(1) behavior.
-       stop := h.nevacuate + 1024
-       if stop > newbit {
-               stop = newbit
-       }
-       for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
-               h.nevacuate++
-       }
-       if h.nevacuate == newbit { // newbit == # of oldbuckets
-               // Growing is all done. Free old main bucket array.
-               h.oldbuckets = nil
-               // Can discard old overflow buckets as well.
-               // If they are still referenced by an iterator,
-               // then the iterator holds a pointers to the slice.
-               if h.extra != nil {
-                       h.extra.oldoverflow = nil
-               }
-               h.flags &^= sameSizeGrow
-       }
+       m.Clear()
 }
 
 // Reflect stubs. Called from ../reflect/asm_*.s
 
 //go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *maptype, cap int) *hmap {
+func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map {
        // Check invariants and reflects math.
        if t.Key.Equal == nil {
                throw("runtime.reflect_makemap: unsupported map key type")
        }
-       if t.Key.Size_ > abi.SwissMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
-               t.Key.Size_ <= abi.SwissMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
-               throw("key size wrong")
-       }
-       if t.Elem.Size_ > abi.SwissMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
-               t.Elem.Size_ <= abi.SwissMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
-               throw("elem size wrong")
-       }
-       if t.Key.Align_ > abi.SwissMapBucketCount {
-               throw("key align too big")
-       }
-       if t.Elem.Align_ > abi.SwissMapBucketCount {
-               throw("elem align too big")
-       }
-       if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
-               throw("key size not a multiple of key align")
-       }
-       if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
-               throw("elem size not a multiple of elem align")
-       }
-       if abi.SwissMapBucketCount < 8 {
-               throw("bucketsize too small for proper alignment")
-       }
-       if dataOffset%uintptr(t.Key.Align_) != 0 {
-               throw("need padding in bucket (key)")
-       }
-       if dataOffset%uintptr(t.Elem.Align_) != 0 {
-               throw("need padding in bucket (elem)")
-       }
+       // TODO: other checks
 
        return makemap(t, cap, nil)
 }
 
 //go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
-       elem, ok := mapaccess2(t, h, key)
+func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+       elem, ok := mapaccess2(t, m, key)
        if !ok {
                // reflect wants nil for a missing element
                elem = nil
@@ -1353,8 +284,8 @@ func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
 }
 
 //go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
-func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
-       elem, ok := mapaccess2_faststr(t, h, key)
+func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer {
+       elem, ok := mapaccess2_faststr(t, m, key)
        if !ok {
                // reflect wants nil for a missing element
                elem = nil
@@ -1363,74 +294,74 @@ func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
 }
 
 //go:linkname reflect_mapassign reflect.mapassign0
-func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
-       p := mapassign(t, h, key)
+func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
+       p := mapassign(t, m, key)
        typedmemmove(t.Elem, p, elem)
 }
 
 //go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
-func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
-       p := mapassign_faststr(t, h, key)
+func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) {
+       p := mapassign_faststr(t, m, key)
        typedmemmove(t.Elem, p, elem)
 }
 
 //go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
-       mapdelete(t, h, key)
+func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+       mapdelete(t, m, key)
 }
 
 //go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
-func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
-       mapdelete_faststr(t, h, key)
+func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) {
+       mapdelete_faststr(t, m, key)
 }
 
 //go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
-       mapiterinit(t, h, it)
+func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+       mapiterinit(t, m, it)
 }
 
 //go:linkname reflect_mapiternext reflect.mapiternext
-func reflect_mapiternext(it *hiter) {
+func reflect_mapiternext(it *maps.Iter) {
        mapiternext(it)
 }
 
 //go:linkname reflect_mapiterkey reflect.mapiterkey
-func reflect_mapiterkey(it *hiter) unsafe.Pointer {
-       return it.key
+func reflect_mapiterkey(it *maps.Iter) unsafe.Pointer {
+       return it.Key()
 }
 
 //go:linkname reflect_mapiterelem reflect.mapiterelem
-func reflect_mapiterelem(it *hiter) unsafe.Pointer {
-       return it.elem
+func reflect_mapiterelem(it *maps.Iter) unsafe.Pointer {
+       return it.Elem()
 }
 
 //go:linkname reflect_maplen reflect.maplen
-func reflect_maplen(h *hmap) int {
-       if h == nil {
+func reflect_maplen(m *maps.Map) int {
+       if m == nil {
                return 0
        }
        if raceenabled {
                callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+               racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen))
        }
-       return h.count
+       return int(m.Used())
 }
 
 //go:linkname reflect_mapclear reflect.mapclear
-func reflect_mapclear(t *maptype, h *hmap) {
-       mapclear(t, h)
+func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) {
+       mapclear(t, m)
 }
 
 //go:linkname reflectlite_maplen internal/reflectlite.maplen
-func reflectlite_maplen(h *hmap) int {
-       if h == nil {
+func reflectlite_maplen(m *maps.Map) int {
+       if m == nil {
                return 0
        }
        if raceenabled {
                callerpc := sys.GetCallerPC()
-               racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+               racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen))
        }
-       return h.count
+       return int(m.Used())
 }
 
 // mapinitnoop is a no-op function known the Go linker; if a given global
@@ -1445,169 +376,19 @@ func mapinitnoop()
 //go:linkname mapclone maps.clone
 func mapclone(m any) any {
        e := efaceOf(&m)
-       e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
+       e.data = unsafe.Pointer(mapclone2((*abi.SwissMapType)(unsafe.Pointer(e._type)), (*maps.Map)(e.data)))
        return m
 }
 
-// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
-// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
-func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
-       for i := 0; i < abi.SwissMapBucketCount; i++ {
-               if isEmpty(src.tophash[i]) {
-                       continue
-               }
+func mapclone2(t *abi.SwissMapType, src *maps.Map) *maps.Map {
+       dst := makemap(t, int(src.Used()), nil)
 
-               for ; pos < abi.SwissMapBucketCount; pos++ {
-                       if isEmpty(dst.tophash[pos]) {
-                               break
-                       }
-               }
-
-               if pos == abi.SwissMapBucketCount {
-                       dst = h.newoverflow(t, dst)
-                       pos = 0
-               }
-
-               srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
-               srcEle := add(unsafe.Pointer(src), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
-               dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
-               dstEle := add(unsafe.Pointer(dst), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
-
-               dst.tophash[pos] = src.tophash[i]
-               if t.IndirectKey() {
-                       srcK = *(*unsafe.Pointer)(srcK)
-                       if t.NeedKeyUpdate() {
-                               kStore := newobject(t.Key)
-                               typedmemmove(t.Key, kStore, srcK)
-                               srcK = kStore
-                       }
-                       // Note: if NeedKeyUpdate is false, then the memory
-                       // used to store the key is immutable, so we can share
-                       // it between the original map and its clone.
-                       *(*unsafe.Pointer)(dstK) = srcK
-               } else {
-                       typedmemmove(t.Key, dstK, srcK)
-               }
-               if t.IndirectElem() {
-                       srcEle = *(*unsafe.Pointer)(srcEle)
-                       eStore := newobject(t.Elem)
-                       typedmemmove(t.Elem, eStore, srcEle)
-                       *(*unsafe.Pointer)(dstEle) = eStore
-               } else {
-                       typedmemmove(t.Elem, dstEle, srcEle)
-               }
-               pos++
-               h.count++
+       var iter maps.Iter
+       iter.Init(t, src)
+       for iter.Next(); iter.Key() != nil; iter.Next() {
+               dst.Put(iter.Key(), iter.Elem())
        }
-       return dst, pos
-}
-
-func mapclone2(t *maptype, src *hmap) *hmap {
-       hint := src.count
-       if overLoadFactor(hint, src.B) {
-               // Note: in rare cases (e.g. during a same-sized grow) the map
-               // can be overloaded. Make sure we don't allocate a destination
-               // bucket array larger than the source bucket array.
-               // This will cause the cloned map to be overloaded also,
-               // but that's better than crashing. See issue 69110.
-               hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
-       }
-       dst := makemap(t, hint, nil)
-       dst.hash0 = src.hash0
-       dst.nevacuate = 0
-       // flags do not need to be copied here, just like a new map has no flags.
-
-       if src.count == 0 {
-               return dst
-       }
-
-       if src.flags&hashWriting != 0 {
-               fatal("concurrent map clone and map write")
-       }
-
-       if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
-               // Quick copy for small maps.
-               dst.buckets = newobject(t.Bucket)
-               dst.count = src.count
-               typedmemmove(t.Bucket, dst.buckets, src.buckets)
-               return dst
-       }
-
-       if dst.B == 0 {
-               dst.buckets = newobject(t.Bucket)
-       }
-       dstArraySize := int(bucketShift(dst.B))
-       srcArraySize := int(bucketShift(src.B))
-       for i := 0; i < dstArraySize; i++ {
-               dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
-               pos := 0
-               for j := 0; j < srcArraySize; j += dstArraySize {
-                       srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
-                       for srcBmap != nil {
-                               dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
-                               srcBmap = srcBmap.overflow(t)
-                       }
-               }
-       }
-
-       if src.oldbuckets == nil {
-               return dst
-       }
-
-       oldB := src.B
-       srcOldbuckets := src.oldbuckets
-       if !src.sameSizeGrow() {
-               oldB--
-       }
-       oldSrcArraySize := int(bucketShift(oldB))
-
-       for i := 0; i < oldSrcArraySize; i++ {
-               srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
-               if evacuated(srcBmap) {
-                       continue
-               }
-
-               if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
-                       dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
-                       for dstBmap.overflow(t) != nil {
-                               dstBmap = dstBmap.overflow(t)
-                       }
-                       pos := 0
-                       for srcBmap != nil {
-                               dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
-                               srcBmap = srcBmap.overflow(t)
-                       }
-                       continue
-               }
-
-               // oldB < dst.B, so a single source bucket may go to multiple destination buckets.
-               // Process entries one at a time.
-               for srcBmap != nil {
-                       // move from oldBlucket to new bucket
-                       for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                               if isEmpty(srcBmap.tophash[i]) {
-                                       continue
-                               }
-
-                               if src.flags&hashWriting != 0 {
-                                       fatal("concurrent map clone and map write")
-                               }
 
-                               srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
-                               if t.IndirectKey() {
-                                       srcK = *((*unsafe.Pointer)(srcK))
-                               }
-
-                               srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
-                               if t.IndirectElem() {
-                                       srcEle = *((*unsafe.Pointer)(srcEle))
-                               }
-                               dstEle := mapassign(t, dst, srcK)
-                               typedmemmove(t.Elem, dstEle, srcEle)
-                       }
-                       srcBmap = srcBmap.overflow(t)
-               }
-       }
        return dst
 }
 
@@ -1615,127 +396,14 @@ func mapclone2(t *maptype, src *hmap) *hmap {
 //
 //go:linkname keys maps.keys
 func keys(m any, p unsafe.Pointer) {
-       e := efaceOf(&m)
-       t := (*maptype)(unsafe.Pointer(e._type))
-       h := (*hmap)(e.data)
-
-       if h == nil || h.count == 0 {
-               return
-       }
-       s := (*slice)(p)
-       r := int(rand())
-       offset := uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
-       if h.B == 0 {
-               copyKeys(t, h, (*bmap)(h.buckets), s, offset)
-               return
-       }
-       arraySize := int(bucketShift(h.B))
-       buckets := h.buckets
-       for i := 0; i < arraySize; i++ {
-               bucket := (i + r) & (arraySize - 1)
-               b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
-               copyKeys(t, h, b, s, offset)
-       }
-
-       if h.growing() {
-               oldArraySize := int(h.noldbuckets())
-               for i := 0; i < oldArraySize; i++ {
-                       bucket := (i + r) & (oldArraySize - 1)
-                       b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
-                       if evacuated(b) {
-                               continue
-                       }
-                       copyKeys(t, h, b, s, offset)
-               }
-       }
-       return
-}
-
-func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
-       for b != nil {
-               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                       offi := (i + uintptr(offset)) & (abi.SwissMapBucketCount - 1)
-                       if isEmpty(b.tophash[offi]) {
-                               continue
-                       }
-                       if h.flags&hashWriting != 0 {
-                               fatal("concurrent map read and map write")
-                       }
-                       k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
-                       if t.IndirectKey() {
-                               k = *((*unsafe.Pointer)(k))
-                       }
-                       if s.len >= s.cap {
-                               fatal("concurrent map read and map write")
-                       }
-                       typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
-                       s.len++
-               }
-               b = b.overflow(t)
-       }
+       // Currently unused in the maps package.
+       panic("unimplemented")
 }
 
 // values for implementing maps.values
 //
 //go:linkname values maps.values
 func values(m any, p unsafe.Pointer) {
-       e := efaceOf(&m)
-       t := (*maptype)(unsafe.Pointer(e._type))
-       h := (*hmap)(e.data)
-       if h == nil || h.count == 0 {
-               return
-       }
-       s := (*slice)(p)
-       r := int(rand())
-       offset := uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
-       if h.B == 0 {
-               copyValues(t, h, (*bmap)(h.buckets), s, offset)
-               return
-       }
-       arraySize := int(bucketShift(h.B))
-       buckets := h.buckets
-       for i := 0; i < arraySize; i++ {
-               bucket := (i + r) & (arraySize - 1)
-               b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
-               copyValues(t, h, b, s, offset)
-       }
-
-       if h.growing() {
-               oldArraySize := int(h.noldbuckets())
-               for i := 0; i < oldArraySize; i++ {
-                       bucket := (i + r) & (oldArraySize - 1)
-                       b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
-                       if evacuated(b) {
-                               continue
-                       }
-                       copyValues(t, h, b, s, offset)
-               }
-       }
-       return
-}
-
-func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
-       for b != nil {
-               for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
-                       offi := (i + uintptr(offset)) & (abi.SwissMapBucketCount - 1)
-                       if isEmpty(b.tophash[offi]) {
-                               continue
-                       }
-
-                       if h.flags&hashWriting != 0 {
-                               fatal("concurrent map read and map write")
-                       }
-
-                       ele := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
-                       if t.IndirectElem() {
-                               ele = *((*unsafe.Pointer)(ele))
-                       }
-                       if s.len >= s.cap {
-                               fatal("concurrent map read and map write")
-                       }
-                       typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
-                       s.len++
-               }
-               b = b.overflow(t)
-       }
+       // Currently unused in the maps package.
+       panic("unimplemented")
 }
index 78db4aa9268be6840fb64c80e2500ea6e8576514..aa019e1c31a97c5ded5d44b0aec40efbe51b75d1 100644 (file)
@@ -8,17 +8,41 @@ package runtime_test
 
 import (
        "internal/abi"
-       "runtime"
+       "internal/goarch"
+       "internal/runtime/maps"
        "slices"
        "testing"
+       "unsafe"
 )
 
+func TestHmapSize(t *testing.T) {
+       // The structure of Map is defined in internal/runtime/maps/map.go
+       // and in cmd/compile/internal/reflectdata/map_swiss.go and must be in sync.
+       // The size of Map should be 72 bytes on 64 bit and 56 bytes on 32 bit platforms.
+       wantSize := uintptr(4*goarch.PtrSize + 5*8)
+       gotSize := unsafe.Sizeof(maps.Map{})
+       if gotSize != wantSize {
+               t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
+       }
+}
+
+// See also reflect_test.TestGroupSizeZero.
+func TestGroupSizeZero(t *testing.T) {
+       var m map[struct{}]struct{}
+       mTyp := abi.TypeOf(m)
+       mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
+
+       // internal/runtime/maps when create pointers to slots, even if slots
+       // are size 0. The compiler should have reserved an extra word to
+       // ensure that pointers to the zero-size type at the end of group are
+       // valid.
+       if mt.Group.Size() <= 8 {
+               t.Errorf("Group size got %d want >8", mt.Group.Size())
+       }
+}
+
 func TestMapIterOrder(t *testing.T) {
        sizes := []int{3, 7, 9, 15}
-       if abi.SwissMapBucketCountBits >= 5 {
-               // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
-               t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.SwissMapBucketCountBits)
-       }
        for _, n := range sizes {
                for i := 0; i < 1000; i++ {
                        // Make m be {0: true, 1: true, ..., n-1: true}.
@@ -50,139 +74,6 @@ func TestMapIterOrder(t *testing.T) {
        }
 }
 
-const bs = abi.SwissMapBucketCount
-
-// belowOverflow should be a pretty-full pair of buckets;
-// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
-// that are 13/16 full each, which is the overflow boundary.
-// Adding one to that should ensure overflow to the next higher size.
-const (
-       belowOverflow = bs * 3 / 2           // 1.5 bs = 2 buckets @ 75%
-       atOverflow    = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
-)
-
-var mapBucketTests = [...]struct {
-       n        int // n is the number of map elements
-       noescape int // number of expected buckets for non-escaping map
-       escape   int // number of expected buckets for escaping map
-}{
-       {-(1 << 30), 1, 1},
-       {-1, 1, 1},
-       {0, 1, 1},
-       {1, 1, 1},
-       {bs, 1, 1},
-       {bs + 1, 2, 2},
-       {belowOverflow, 2, 2},  // 1.5 bs = 2 buckets @ 75%
-       {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
-
-       {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
-       {2*atOverflow + 1, 8, 8},  // 13/4 bs + 1 = overflow to 8
-
-       {4 * belowOverflow, 8, 8},  // 6 bs = 8 buckets @ 75%
-       {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
-}
-
 func TestMapBuckets(t *testing.T) {
-       // Test that maps of different sizes have the right number of buckets.
-       // Non-escaping maps with small buckets (like map[int]int) never
-       // have a nil bucket pointer due to starting with preallocated buckets
-       // on the stack. Escaping maps start with a non-nil bucket pointer if
-       // hint size is above bucketCnt and thereby have more than one bucket.
-       // These tests depend on bucketCnt and loadFactor* in map.go.
-       t.Run("mapliteral", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := map[int]int{}
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(map[int]int{})
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
-       t.Run("nohint", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := make(map[int]int)
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(make(map[int]int))
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
-       t.Run("makemap", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := make(map[int]int, tt.n)
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(make(map[int]int, tt.n))
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
-       t.Run("makemap64", func(t *testing.T) {
-               for _, tt := range mapBucketTests {
-                       localMap := make(map[int]int, int64(tt.n))
-                       if runtime.MapBucketsPointerIsNil(localMap) {
-                               t.Errorf("no escape: buckets pointer is nil for non-escaping map")
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               localMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
-                               t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
-                       }
-                       escapingMap := runtime.Escape(make(map[int]int, tt.n))
-                       if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
-                               t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
-                       }
-                       for i := 0; i < tt.n; i++ {
-                               escapingMap[i] = i
-                       }
-                       if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
-                               t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
-                       }
-               }
-       })
+       t.Skipf("todo")
 }
index 7d884c4922c6f5e9513c8b401c79bba3dfd53e12..8a73b9ff6fdd503ae52e73657a3462630bc58480 100644 (file)
@@ -6,7 +6,7 @@ package runtime_test
 
 import (
        "fmt"
-       "internal/goarch"
+       "internal/goexperiment"
        "internal/testenv"
        "math"
        "os"
@@ -20,17 +20,6 @@ import (
        "unsafe"
 )
 
-func TestHmapSize(t *testing.T) {
-       // The structure of hmap is defined in runtime/map.go
-       // and in cmd/compile/internal/gc/reflect.go and must be in sync.
-       // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
-       var hmapSize = uintptr(8 + 5*goarch.PtrSize)
-       if runtime.RuntimeHmapSize != hmapSize {
-               t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
-       }
-
-}
-
 // negative zero is a good test because:
 //  1. 0 and -0 are equal, yet have distinct representations.
 //  2. 0 is represented as all zeros, -0 isn't.
@@ -430,6 +419,12 @@ func TestEmptyKeyAndValue(t *testing.T) {
        if len(a) != 1 {
                t.Errorf("empty value insert problem")
        }
+       if len(b) != 1 {
+               t.Errorf("empty key insert problem")
+       }
+       if len(c) != 1 {
+               t.Errorf("empty key+value insert problem")
+       }
        if b[empty{}] != 1 {
                t.Errorf("empty key returned wrong value")
        }
@@ -668,33 +663,37 @@ func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) }
 var testNonEscapingMapVariable int = 8
 
 func TestNonEscapingMap(t *testing.T) {
+       if goexperiment.SwissMap {
+               t.Skip("TODO(go.dev/issue/54766): implement stack allocated maps")
+       }
+
        n := testing.AllocsPerRun(1000, func() {
                m := map[int]int{}
                m[0] = 0
        })
        if n != 0 {
-               t.Fatalf("mapliteral: want 0 allocs, got %v", n)
+               t.Errorf("mapliteral: want 0 allocs, got %v", n)
        }
        n = testing.AllocsPerRun(1000, func() {
                m := make(map[int]int)
                m[0] = 0
        })
        if n != 0 {
-               t.Fatalf("no hint: want 0 allocs, got %v", n)
+               t.Errorf("no hint: want 0 allocs, got %v", n)
        }
        n = testing.AllocsPerRun(1000, func() {
                m := make(map[int]int, 8)
                m[0] = 0
        })
        if n != 0 {
-               t.Fatalf("with small hint: want 0 allocs, got %v", n)
+               t.Errorf("with small hint: want 0 allocs, got %v", n)
        }
        n = testing.AllocsPerRun(1000, func() {
                m := make(map[int]int, testNonEscapingMapVariable)
                m[0] = 0
        })
        if n != 0 {
-               t.Fatalf("with variable hint: want 0 allocs, got %v", n)
+               t.Errorf("with variable hint: want 0 allocs, got %v", n)
        }
 
 }
@@ -1246,22 +1245,11 @@ func TestEmptyMapWithInterfaceKey(t *testing.T) {
        })
 }
 
-func TestLoadFactor(t *testing.T) {
-       for b := uint8(0); b < 20; b++ {
-               count := 13 * (1 << b) / 2 // 6.5
-               if b == 0 {
-                       count = 8
-               }
-               if runtime.OverLoadFactor(count, b) {
-                       t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
-               }
-               if !runtime.OverLoadFactor(count+1, b) {
-                       t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
-               }
+func TestMapKeys(t *testing.T) {
+       if goexperiment.SwissMap {
+               t.Skip("mapkeys not implemented for swissmaps")
        }
-}
 
-func TestMapKeys(t *testing.T) {
        type key struct {
                s   string
                pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes
@@ -1277,6 +1265,10 @@ func TestMapKeys(t *testing.T) {
 }
 
 func TestMapValues(t *testing.T) {
+       if goexperiment.SwissMap {
+               t.Skip("mapvalues not implemented for swissmaps")
+       }
+
        type val struct {
                s   string
                pad [128]byte // sizeof(val) > abi.MapMaxElemBytes
index f2a9bd055a9ab625e56665649f7d5daf563b8e3c..ef01d6a1946d898a71d1a0d752a5529e07c3a709 100644 (file)
@@ -9,6 +9,7 @@ import (
        "flag"
        "fmt"
        "internal/abi"
+       "internal/goexperiment"
        "internal/testenv"
        "os"
        "os/exec"
@@ -185,6 +186,9 @@ func TestGdbPythonCgo(t *testing.T) {
 }
 
 func testGdbPython(t *testing.T, cgo bool) {
+       if goexperiment.SwissMap {
+               t.Skip("TODO(prattmic): swissmap DWARF")
+       }
        if cgo {
                testenv.MustHaveCGO(t)
        }
@@ -527,6 +531,10 @@ func main() {
 // TestGdbAutotmpTypes ensures that types of autotmp variables appear in .debug_info
 // See bug #17830.
 func TestGdbAutotmpTypes(t *testing.T) {
+       if goexperiment.SwissMap {
+               t.Skip("TODO(prattmic): swissmap DWARF")
+       }
+
        checkGdbEnvironment(t)
        t.Parallel()
        checkGdbVersion(t)
index 71a4bcac31a16efd6e0f9bec98a6399bc65c21c3..ab51d0b5a6e40ed0d6c932aaa8561bac74c745ed 100644 (file)
@@ -1,5 +1,7 @@
 // run
 
+//go:build !goexperiment.swissmap
+
 // Copyright 2024 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
index 8e4fdc7f467034c907da64e7b8a8172dd6555c49..bb67af0cd0f396f7b75692bea843e89f62f4fba0 100644 (file)
@@ -438,7 +438,7 @@ func f28(b bool) {
 
 func f29(b bool) {
        if b {
-               for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hiter$"
+               for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$"
                        printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
                }
        }
@@ -647,7 +647,7 @@ func bad40() {
 
 func good40() {
        ret := T40{}              // ERROR "stack object ret T40$"
-       ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$"
+       ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.table)$"
        t := &ret
        printnl() // ERROR "live at call to printnl: ret$"
        // Note: ret is live at the printnl because the compiler moves &ret
index 2beac4f8d2bbe4e70711c209383dc198a47829d5..7b66b6c5a87a8f3157a51eb14811a841f53cc7b7 100644 (file)
@@ -27,14 +27,14 @@ func newT40() *T40 {
 }
 
 func bad40() {
-       t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ runtime.hmap$"
+       t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.table)$"
        printnl()     // ERROR "live at call to printnl: ret$"
        useT40(t)
 }
 
 func good40() {
        ret := T40{}                  // ERROR "stack object ret T40$"
-       ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ runtime.hmap$"
+       ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.table)$"
        t := &ret
        printnl() // ERROR "live at call to printnl: ret$"
        useT40(t)
index 3bd7158ffeeaa0eb0f21d690c52666ce57473934..28b4077493da20a7fabdc50011d9c0baafe38502 100644 (file)
@@ -434,7 +434,7 @@ func f28(b bool) {
 
 func f29(b bool) {
        if b {
-               for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hiter$"
+               for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$"
                        printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
                }
        }
@@ -641,16 +641,6 @@ func bad40() {
        printnl()
 }
 
-func good40() {
-       ret := T40{}              // ERROR "stack object ret T40$"
-       ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$"
-       t := &ret
-       printnl() // ERROR "live at call to printnl: ret$"
-       // Note: ret is live at the printnl because the compiler moves &ret
-       // from before the printnl to after.
-       useT40(t)
-}
-
 func ddd1(x, y *int) { // ERROR "live at entry to ddd1: x y$"
        ddd2(x, y) // ERROR "stack object .autotmp_[0-9]+ \[2\]\*int$"
        printnl()
index 636d4e5a0c2c89470dd95fe43b31fd6f48988465..43881c3b617390898ca3110dc5f457aad0b83964 100644 (file)
@@ -36,3 +36,22 @@ func f17c() {
 }
 
 func f17d() *byte
+
+func printnl()
+
+type T40 struct {
+       m map[int]int
+}
+
+//go:noescape
+func useT40(*T40)
+
+func good40() {
+       ret := T40{}              // ERROR "stack object ret T40$"
+       ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$"
+       t := &ret
+       printnl() // ERROR "live at call to printnl: ret$"
+       // Note: ret is live at the printnl because the compiler moves &ret
+       // from before the printnl to after.
+       useT40(t)
+}
index d35b8aadfee08865d8736c59cd1bea3913467acd..e00a71409fe0ae44070b87a750a7625f5d92f526 100644 (file)
@@ -38,3 +38,22 @@ func f17c() {
 }
 
 func f17d() *byte
+
+func printnl()
+
+type T40 struct {
+       m map[int]int
+}
+
+//go:noescape
+func useT40(*T40)
+
+func good40() {
+       ret := T40{}              // ERROR "stack object ret T40$"
+       ret.m = make(map[int]int) // ERROR "stack object .autotmp_[0-9]+ internal/runtime/maps.table$"
+       t := &ret
+       printnl() // ERROR "live at call to printnl: ret$"
+       // Note: ret is live at the printnl because the compiler moves &ret
+       // from before the printnl to after.
+       useT40(t)
+}