Now that there is only one map implementation we can simplify names.
For #54766.
Change-Id: I6a6a636cc6a8fc5e7712c27782fc0ced7467b939
Reviewed-on: https://go-review.googlesource.com/c/go/+/691596
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
"internal/abi"
)
-// SwissMapGroupType makes the map slot group type given the type of the map.
-func SwissMapGroupType(t *types.Type) *types.Type {
- if t.MapType().SwissGroup != nil {
- return t.MapType().SwissGroup
+// MapGroupType makes the map slot group type given the type of the map.
+func MapGroupType(t *types.Type) *types.Type {
+ if t.MapType().Group != nil {
+ return t.MapType().Group
}
// Builds a type representing a group structure for the given map type.
//
// type group struct {
// ctrl uint64
- // slots [abi.SwissMapGroupSlots]struct {
+ // slots [abi.MapGroupSlots]struct {
// key keyType
// elem elemType
// }
elemtype := t.Elem()
types.CalcSize(keytype)
types.CalcSize(elemtype)
- if keytype.Size() > abi.SwissMapMaxKeyBytes {
+ if keytype.Size() > abi.MapMaxKeyBytes {
keytype = types.NewPtr(keytype)
}
- if elemtype.Size() > abi.SwissMapMaxElemBytes {
+ if elemtype.Size() > abi.MapMaxElemBytes {
elemtype = types.NewPtr(elemtype)
}
slot := types.NewStruct(slotFields)
slot.SetNoalg(true)
- slotArr := types.NewArray(slot, abi.SwissMapGroupSlots)
+ slotArr := types.NewArray(slot, abi.MapGroupSlots)
slotArr.SetNoalg(true)
fields := []*types.Field{
// the end to ensure pointers are valid.
base.Fatalf("bad group size for %v", t)
}
- if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() {
+ if t.Key().Size() > abi.MapMaxKeyBytes && !keytype.IsPtr() {
base.Fatalf("key indirect incorrect for %v", t)
}
- if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() {
+ if t.Elem().Size() > abi.MapMaxElemBytes && !elemtype.IsPtr() {
base.Fatalf("elem indirect incorrect for %v", t)
}
- t.MapType().SwissGroup = group
+ t.MapType().Group = group
group.StructType().Map = t
return group
}
-var cachedSwissTableType *types.Type
+var cachedMapTableType *types.Type
-// swissTableType returns a type interchangeable with internal/runtime/maps.table.
+// mapTableType returns a type interchangeable with internal/runtime/maps.table.
// Make sure this stays in sync with internal/runtime/maps/table.go.
-func swissTableType() *types.Type {
- if cachedSwissTableType != nil {
- return cachedSwissTableType
+func mapTableType() *types.Type {
+ if cachedMapTableType != nil {
+ return cachedMapTableType
}
// type table struct {
base.Fatalf("internal/runtime/maps.table size not correct: got %d, want %d", table.Size(), size)
}
- cachedSwissTableType = table
+ cachedMapTableType = table
return table
}
-var cachedSwissMapType *types.Type
+var cachedMapType *types.Type
-// SwissMapType returns a type interchangeable with internal/runtime/maps.Map.
+// MapType returns a type interchangeable with internal/runtime/maps.Map.
// Make sure this stays in sync with internal/runtime/maps/map.go.
-func SwissMapType() *types.Type {
- if cachedSwissMapType != nil {
- return cachedSwissMapType
+func MapType() *types.Type {
+ if cachedMapType != nil {
+ return cachedMapType
}
// type Map struct {
base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", m.Size(), size)
}
- cachedSwissMapType = m
+ cachedMapType = m
return m
}
-var cachedSwissIterType *types.Type
+var cachedMapIterType *types.Type
-// SwissMapIterType returns a type interchangeable with runtime.hiter.
-// Make sure this stays in sync with runtime/map.go.
-func SwissMapIterType() *types.Type {
- if cachedSwissIterType != nil {
- return cachedSwissIterType
+// MapIterType returns a type interchangeable with internal/runtime/maps.Iter.
+// Make sure this stays in sync with internal/runtime/maps/table.go.
+func MapIterType() *types.Type {
+ if cachedMapIterType != nil {
+ return cachedMapIterType
}
// type Iter struct {
// key unsafe.Pointer // *Key
// elem unsafe.Pointer // *Elem
- // typ unsafe.Pointer // *SwissMapType
+ // typ unsafe.Pointer // *MapType
// m *Map
//
// groupSlotOffset uint64
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("typ", types.Types[types.TUNSAFEPTR]),
- makefield("m", types.NewPtr(SwissMapType())),
+ makefield("m", types.NewPtr(MapType())),
makefield("groupSlotOffset", types.Types[types.TUINT64]),
makefield("dirOffset", types.Types[types.TUINT64]),
makefield("clearSeq", types.Types[types.TUINT64]),
makefield("globalDepth", types.Types[types.TUINT8]),
makefield("dirIdx", types.Types[types.TINT]),
- makefield("tab", types.NewPtr(swissTableType())),
+ makefield("tab", types.NewPtr(mapTableType())),
makefield("group", types.Types[types.TUNSAFEPTR]),
makefield("entryIdx", types.Types[types.TUINT64]),
}
base.Fatalf("internal/runtime/maps.Iter size not correct: got %d, want %d", iter.Size(), size)
}
- cachedSwissIterType = iter
+ cachedMapIterType = iter
return iter
}
-func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
- // internal/abi.SwissMapType
- gtyp := SwissMapGroupType(t)
+func writeMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
+ // internal/abi.MapType
+ gtyp := MapGroupType(t)
s1 := writeType(t.Key())
s2 := writeType(t.Elem())
s3 := writeType(gtyp)
c.Field("ElemOff").WriteUintptr(uint64(elemOff))
var flags uint32
if needkeyupdate(t.Key()) {
- flags |= abi.SwissMapNeedKeyUpdate
+ flags |= abi.MapNeedKeyUpdate
}
if hashMightPanic(t.Key()) {
- flags |= abi.SwissMapHashMightPanic
+ flags |= abi.MapHashMightPanic
}
- if t.Key().Size() > abi.SwissMapMaxKeyBytes {
- flags |= abi.SwissMapIndirectKey
+ if t.Key().Size() > abi.MapMaxKeyBytes {
+ flags |= abi.MapIndirectKey
}
- if t.Elem().Size() > abi.SwissMapMaxKeyBytes {
- flags |= abi.SwissMapIndirectElem
+ if t.Elem().Size() > abi.MapMaxKeyBytes {
+ flags |= abi.MapIndirectElem
}
c.Field("Flags").WriteUint32(flags)
}
case types.TMAP:
- writeSwissMapType(t, lsym, c)
+ writeMapType(t, lsym, c)
case types.TPTR:
// internal/abi.PtrType
ChanType = FromReflect(reflect.TypeOf(abi.ChanType{}))
FuncType = FromReflect(reflect.TypeOf(abi.FuncType{}))
InterfaceType = FromReflect(reflect.TypeOf(abi.InterfaceType{}))
- MapType = FromReflect(reflect.TypeOf(abi.SwissMapType{}))
+ MapType = FromReflect(reflect.TypeOf(abi.MapType{}))
PtrType = FromReflect(reflect.TypeOf(abi.PtrType{}))
SliceType = FromReflect(reflect.TypeOf(abi.SliceType{}))
StructType = FromReflect(reflect.TypeOf(abi.StructType{}))
// No PSIGNB, simply do byte equality with ctrlEmpty.
// Load ctrlEmpty into each byte of a control word.
- var ctrlsEmpty uint64 = abi.SwissMapCtrlEmpty
+ var ctrlsEmpty uint64 = abi.MapCtrlEmpty
e := s.constInt64(types.Types[types.TUINT64], int64(ctrlsEmpty))
// Explicit copy to fp register. See
// https://go.dev/issue/70451.
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
- _ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map
+ _ = types.NewPtr(reflectdata.MapType()) // *internal/runtime/maps.Map
_ = types.NewPtr(deferstruct()) // *runtime._defer
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
}
// map <--> *internal/runtime/maps.Map
- mt := types.NewPtr(reflectdata.SwissMapType())
+ mt := types.NewPtr(reflectdata.MapType())
if to.Kind() == types.TMAP && from == mt {
return v
}
case ir.OLEN:
if n.X.Type().IsMap() {
// length is stored in the first word, but needs conversion to int.
- loadType := reflectdata.SwissMapType().Field(0).Type // uint64
+ loadType := reflectdata.MapType().Field(0).Type // uint64
load := s.load(loadType, x)
s.vars[n] = s.conv(nil, load, loadType, lenType) // integer conversion doesn't need Node
} else {
// Format the bucket struct for map[x]y as map.group[x]y.
// This avoids a recursive print that generates very long names.
switch t {
- case mt.SwissGroup:
+ case mt.Group:
b.WriteString("map.group[")
default:
base.Fatalf("unknown internal map type")
Key *Type // Key type
Elem *Type // Val (elem) type
- SwissGroup *Type // internal struct type representing a slot group
+ Group *Type // internal struct type representing a slot group
}
// MapType returns t's extra map-specific fields.
// walkMakeMap walks an OMAKEMAP node.
func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
t := n.Type()
- mapType := reflectdata.SwissMapType()
+ mapType := reflectdata.MapType()
hint := n.Len
// var m *Map
m = stackTempAddr(init, mapType)
// Allocate one group pointed to by m.dirPtr on stack if hint
- // is not larger than SwissMapGroupSlots. In case hint is
+ // is not larger than MapGroupSlots. In case hint is
// larger, runtime.makemap will allocate on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !ir.IsConst(hint, constant.Int) ||
- constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapGroupSlots)) {
- // In case hint is larger than SwissMapGroupSlots
+ // In case hint is larger than MapGroupSlots
// runtime.makemap will allocate on the heap, see
// #20184
//
- // if hint <= abi.SwissMapGroupSlots {
+ // if hint <= abi.MapGroupSlots {
// var gv group
// g = &gv
- // g.ctrl = abi.SwissMapCtrlEmpty
+ // g.ctrl = abi.MapCtrlEmpty
// m.dirPtr = g
// }
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.SwissMapGroupSlots)), nil, nil)
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.MapGroupSlots)), nil, nil)
nif.Likely = true
- groupType := reflectdata.SwissMapGroupType(t)
+ groupType := reflectdata.MapGroupType(t)
// var gv group
// g = &gv
// Can't use ir.NewInt because bit 63 is set, which
// makes conversion to uint64 upset.
- empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.SwissMapCtrlEmpty))
+ empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.MapCtrlEmpty))
- // g.ctrl = abi.SwissMapCtrlEmpty
+ // g.ctrl = abi.MapCtrlEmpty
csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map.go
ca := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, g, csym), empty)
nif.Body.Append(ca)
}
}
- if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapGroupSlots)) {
// Handling make(map[any]any) and
- // make(map[any]any, hint) where hint <= abi.SwissMapGroupSlots
+ // make(map[any]any, hint) where hint <= abi.MapGroupSlots
// specially allows for faster map initialization and
// improves binary size by using calls with fewer arguments.
- // For hint <= abi.SwissMapGroupSlots no groups will be
+ // For hint <= abi.MapGroupSlots no groups will be
// allocated by makemap. Therefore, no groups need to be
// allocated in this code path.
if n.Esc() == ir.EscNone {
n.X = o.copyExpr(r)
// n.Prealloc is the temp for the iterator.
- // SwissMapIterType contains pointers and needs to be zeroed.
- n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true)
+ // MapIterType contains pointers and needs to be zeroed.
+ n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
}
n.Key = o.exprInPlace(n.Key)
n.Value = o.exprInPlace(n.Value)
hit := nrange.Prealloc
th := hit.Type()
// depends on layout of iterator struct.
- // See cmd/compile/internal/reflectdata/map.go:SwissMapIterType
+ // See cmd/compile/internal/reflectdata/map.go:MapIterType
keysym := th.Field(0).Sym
elemsym := th.Field(1).Sym // ditto
iterInit := "mapIterStart"
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
- if t.Elem().Size() > abi.SwissMapMaxElemBytes {
+ if t.Elem().Size() > abi.MapMaxElemBytes {
return mapslow
}
switch reflectdata.AlgType(t.Key()) {
case abi.Chan: // reflect.chanType
off += 2 * arch.PtrSize
case abi.Map:
- off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType
+ off += 7*arch.PtrSize + 4 // internal/abi.MapType
if arch.PtrSize == 8 {
off += 4 // padding for final uint32 field (Flags).
}
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+int32(arch.PtrSize)) // 0x20 / 0x38
}
-func decodetypeMapSwissGroup(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
+func decodetypeMapGroup(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+2*int32(arch.PtrSize)) // 0x24 / 0x40
}
keyType := decodetypeMapKey(d.ldr, d.arch, gotype)
valType := decodetypeMapValue(d.ldr, d.arch, gotype)
- groupType := decodetypeMapSwissGroup(d.ldr, d.arch, gotype)
+ groupType := decodetypeMapGroup(d.ldr, d.arch, gotype)
keyType = d.walksymtypedef(d.defgotype(keyType))
valType = d.walksymtypedef(d.defgotype(valType))
"type:internal/abi.ArrayType",
"type:internal/abi.ChanType",
"type:internal/abi.FuncType",
+ "type:internal/abi.MapType",
"type:internal/abi.PtrType",
"type:internal/abi.SliceType",
"type:internal/abi.StructType",
- "type:internal/abi.SwissMapType",
"type:internal/abi.InterfaceType",
"type:internal/abi.ITab",
"type:internal/abi.Imethod"} {
"internal/abi.ArrayType": true,
"internal/abi.ChanType": true,
"internal/abi.FuncType": true,
+ "internal/abi.MapType": true,
"internal/abi.PtrType": true,
"internal/abi.SliceType": true,
"internal/abi.StructType": true,
- "internal/abi.SwissMapType": true,
"internal/abi.InterfaceType": true,
"internal/abi.ITab": true,
}
s := seed.s
var m map[T]struct{}
mTyp := abi.TypeOf(m)
- hasher := (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher
+ hasher := (*abi.MapType)(unsafe.Pointer(mTyp)).Hasher
if goarch.PtrSize == 8 {
return uint64(hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s)))
}
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Number of bits in the group.slot count.
- SwissMapGroupSlotsBits = 3
+ MapGroupSlotsBits = 3
// Number of slots in a group.
- SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8
+ MapGroupSlots = 1 << MapGroupSlotsBits // 8
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
- SwissMapMaxKeyBytes = 128
- SwissMapMaxElemBytes = 128
+ MapMaxKeyBytes = 128
+ MapMaxElemBytes = 128
ctrlEmpty = 0b10000000
bitsetLSB = 0x0101010101010101
// Value of control word with all empty slots.
- SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
+ MapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
)
-type SwissMapType struct {
+type MapType struct {
Type
Key *Type
Elem *Type
// Flag values
const (
- SwissMapNeedKeyUpdate = 1 << iota
- SwissMapHashMightPanic
- SwissMapIndirectKey
- SwissMapIndirectElem
+ MapNeedKeyUpdate = 1 << iota
+ MapHashMightPanic
+ MapIndirectKey
+ MapIndirectElem
)
-func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
- return mt.Flags&SwissMapNeedKeyUpdate != 0
+func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
+ return mt.Flags&MapNeedKeyUpdate != 0
}
-func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
- return mt.Flags&SwissMapHashMightPanic != 0
+func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
+ return mt.Flags&MapHashMightPanic != 0
}
-func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself
- return mt.Flags&SwissMapIndirectKey != 0
+func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
+ return mt.Flags&MapIndirectKey != 0
}
-func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
- return mt.Flags&SwissMapIndirectElem != 0
+func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
+ return mt.Flags&MapIndirectElem != 0
}
return &(*u)(unsafe.Pointer(t)).u
case Map:
type u struct {
- SwissMapType
+ MapType
u UncommonType
}
return &(*u)(unsafe.Pointer(t)).u
tt := (*ChanType)(unsafe.Pointer(t))
return tt.Elem
case Map:
- tt := (*SwissMapType)(unsafe.Pointer(t))
+ tt := (*MapType)(unsafe.Pointer(t))
return tt.Elem
case Pointer:
tt := (*PtrType)(unsafe.Pointer(t))
return (*StructType)(unsafe.Pointer(t))
}
-// MapType returns t cast to a *SwissMapType, or nil if its tag does not match.
-func (t *Type) MapType() *SwissMapType {
+// MapType returns t cast to a *MapType, or nil if its tag does not match.
+func (t *Type) MapType() *MapType {
if t.Kind() != Map {
return nil
}
- return (*SwissMapType)(unsafe.Pointer(t))
+ return (*MapType)(unsafe.Pointer(t))
}
// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
func (t *Type) Key() *Type {
if t.Kind() == Map {
- return (*SwissMapType)(unsafe.Pointer(t)).Key
+ return (*MapType)(unsafe.Pointer(t)).Key
}
return nil
}
// we can't properly test hint alloc overflows with this.
const maxAllocTest = 1 << 30
-func newTestMapType[K comparable, V any]() *abi.SwissMapType {
+func newTestMapType[K comparable, V any]() *abi.MapType {
var m map[K]V
mTyp := abi.TypeOf(m)
- mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
+ mt := (*abi.MapType)(unsafe.Pointer(mTyp))
return mt
}
-func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.SwissMapType) {
+func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.MapType) {
mt := newTestMapType[K, V]()
return NewMap(mt, hint, nil, maxAllocTest), mt
}
// Returns nil if there are no full groups.
// Returns nil if a group is full but contains entirely deleted slots.
// Returns nil if the map is small.
-func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
+func (m *Map) KeyFromFullGroup(typ *abi.MapType) unsafe.Pointer {
if m.dirLen <= 0 {
return nil
}
}
// All full or deleted slots.
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if g.ctrls().get(j) == ctrlDeleted {
continue
}
}
// Returns nil if the map is small.
-func (m *Map) TableFor(typ *abi.SwissMapType, key unsafe.Pointer) *table {
+func (m *Map) TableFor(typ *abi.MapType, key unsafe.Pointer) *table {
if m.dirLen <= 0 {
return nil
}
// TODO(prattmic): Consider inverting the top bit so that the zero value is empty.
type ctrl uint8
-// ctrlGroup is a fixed size array of abi.SwissMapGroupSlots control bytes
+// ctrlGroup is a fixed size array of abi.MapGroupSlots control bytes
// stored in a uint64.
type ctrlGroup uint64
// groupReference is a wrapper type representing a single slot group stored at
// data.
//
-// A group holds abi.SwissMapGroupSlots slots (key/elem pairs) plus their
+// A group holds abi.MapGroupSlots slots (key/elem pairs) plus their
// control word.
type groupReference struct {
// data points to the group, which is described by typ.Group and has
//
// type group struct {
// ctrls ctrlGroup
- // slots [abi.SwissMapGroupSlots]slot
+ // slots [abi.MapGroupSlots]slot
// }
//
// type slot struct {
}
// key returns a pointer to the key at index i.
-func (g *groupReference) key(typ *abi.SwissMapType, i uintptr) unsafe.Pointer {
+func (g *groupReference) key(typ *abi.MapType, i uintptr) unsafe.Pointer {
offset := groupSlotsOffset + i*typ.SlotSize
return unsafe.Pointer(uintptr(g.data) + offset)
}
// elem returns a pointer to the element at index i.
-func (g *groupReference) elem(typ *abi.SwissMapType, i uintptr) unsafe.Pointer {
+func (g *groupReference) elem(typ *abi.MapType, i uintptr) unsafe.Pointer {
offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff
return unsafe.Pointer(uintptr(g.data) + offset)
// newGroups allocates a new array of length groups.
//
// Length must be a power of two.
-func newGroups(typ *abi.SwissMapType, length uint64) groupsReference {
+func newGroups(typ *abi.MapType, length uint64) groupsReference {
return groupsReference{
// TODO: make the length type the same throughout.
data: newarray(typ.Group, int(length)),
}
// group returns the group at index i.
-func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference {
+func (g *groupsReference) group(typ *abi.MapType, i uint64) groupReference {
// TODO(prattmic): Do something here about truncation on cast to
// uintptr on 32-bit systems?
offset := uintptr(i) * typ.GroupSize
}
}
-func cloneGroup(typ *abi.SwissMapType, newGroup, oldGroup groupReference) {
+func cloneGroup(typ *abi.MapType, newGroup, oldGroup groupReference) {
typedmemmove(typ.Group, newGroup.data, oldGroup.data)
if typ.IndirectKey() {
// Deep copy keys if indirect.
- for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
+ for i := uintptr(0); i < abi.MapGroupSlots; i++ {
oldKey := *(*unsafe.Pointer)(oldGroup.key(typ, i))
if oldKey == nil {
continue
}
if typ.IndirectElem() {
// Deep copy elems if indirect.
- for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
+ for i := uintptr(0); i < abi.MapGroupSlots; i++ {
oldElem := *(*unsafe.Pointer)(oldGroup.elem(typ, i))
if oldElem == nil {
continue
//
// Terminology:
// - Slot: A storage location of a single key/element pair.
-// - Group: A group of abi.SwissMapGroupSlots (8) slots, plus a control word.
+// - Group: A group of abi.MapGroupSlots (8) slots, plus a control word.
// - Control word: An 8-byte word which denotes whether each slot is empty,
// deleted, or used. If a slot is used, its control byte also contains the
// lower 7 bits of the hash (H2).
return h & 0x7f
}
-// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:SwissMapType.
+// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:MapType.
type Map struct {
// The number of filled slots (i.e. the number of elements in all
// tables). Excludes deleted slots.
// details.
//
// Small map optimization: if the map always contained
- // abi.SwissMapGroupSlots or fewer entries, it fits entirely in a
+ // abi.MapGroupSlots or fewer entries, it fits entirely in a
// single group. In that case dirPtr points directly to a single group.
//
// dirPtr *group
// maxAlloc should be runtime.maxAlloc.
//
// TODO(prattmic): Put maxAlloc somewhere accessible.
-func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
+func NewMap(mt *abi.MapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
if m == nil {
m = new(Map)
}
m.seed = uintptr(rand())
- if hint <= abi.SwissMapGroupSlots {
+ if hint <= abi.MapGroupSlots {
// A small map can fill all 8 slots, so no need to increase
// target capacity.
//
// Set initial capacity to hold hint entries without growing in the
// average case.
- targetCapacity := (hint * abi.SwissMapGroupSlots) / maxAvgGroupLoad
+ targetCapacity := (hint * abi.MapGroupSlots) / maxAvgGroupLoad
if targetCapacity < hint { // overflow
return m // return an empty map.
}
// Get performs a lookup of the key that key points to. It returns a pointer to
// the element, or false if the key doesn't exist.
-func (m *Map) Get(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (m *Map) Get(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
return m.getWithoutKey(typ, key)
}
-func (m *Map) getWithKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
+func (m *Map) getWithKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
if m.Used() == 0 {
return nil, nil, false
}
return m.directoryAt(idx).getWithKey(typ, hash, key)
}
-func (m *Map) getWithoutKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (m *Map) getWithoutKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
if m.Used() == 0 {
return nil, false
}
return m.directoryAt(idx).getWithoutKey(typ, hash, key)
}
-func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
+func (m *Map) getWithKeySmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
g := groupReference{
data: m.dirPtr,
}
return nil, nil, false
}
-func (m *Map) Put(typ *abi.SwissMapType, key, elem unsafe.Pointer) {
+func (m *Map) Put(typ *abi.MapType, key, elem unsafe.Pointer) {
slotElem := m.PutSlot(typ, key)
typedmemmove(typ.Elem, slotElem, elem)
}
// should be written.
//
// PutSlot never returns nil.
-func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer {
+func (m *Map) PutSlot(typ *abi.MapType, key unsafe.Pointer) unsafe.Pointer {
if m.writing != 0 {
fatal("concurrent map writes")
}
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmall(typ, hash, key)
if m.writing == 0 {
}
}
-func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
+func (m *Map) putSlotSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
return slotElem
}
-func (m *Map) growToSmall(typ *abi.SwissMapType) {
+func (m *Map) growToSmall(typ *abi.MapType) {
grp := newGroups(typ, 1)
m.dirPtr = grp.data
g.ctrls().setEmpty()
}
-func (m *Map) growToTable(typ *abi.SwissMapType) {
- tab := newTable(typ, 2*abi.SwissMapGroupSlots, 0, 0)
+func (m *Map) growToTable(typ *abi.MapType) {
+ tab := newTable(typ, 2*abi.MapGroupSlots, 0, 0)
g := groupReference{
data: m.dirPtr,
}
- for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
+ for i := uintptr(0); i < abi.MapGroupSlots; i++ {
if (g.ctrls().get(i) & ctrlEmpty) == ctrlEmpty {
// Empty
continue
m.globalShift = depthToShift(m.globalDepth)
}
-func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) {
+func (m *Map) Delete(typ *abi.MapType, key unsafe.Pointer) {
if m == nil || m.Used() == 0 {
if err := mapKeyError(typ, key); err != nil {
panic(err) // see issue 23734
m.writing ^= 1
}
-func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) {
+func (m *Map) deleteSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) {
g := groupReference{
data: m.dirPtr,
}
}
// Clear deletes all entries from the map resulting in an empty map.
-func (m *Map) Clear(typ *abi.SwissMapType) {
+func (m *Map) Clear(typ *abi.MapType) {
if m == nil || m.Used() == 0 && !m.tombstonePossible {
return
}
m.writing ^= 1
}
-func (m *Map) clearSmall(typ *abi.SwissMapType) {
+func (m *Map) clearSmall(typ *abi.MapType) {
g := groupReference{
data: m.dirPtr,
}
m.used = 0
}
-func (m *Map) Clone(typ *abi.SwissMapType) *Map {
+func (m *Map) Clone(typ *abi.MapType) *Map {
// Note: this should never be called with a nil map.
if m.writing != 0 {
fatal("concurrent map clone and map write")
return m
}
-func mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error {
+func mapKeyError(t *abi.MapType, p unsafe.Pointer) error {
if !t.HashMightPanic() {
return nil
}
func TestCtrlSize(t *testing.T) {
cs := unsafe.Sizeof(maps.CtrlGroup(0))
- if cs != abi.SwissMapGroupSlots {
- t.Errorf("ctrlGroup size got %d want abi.SwissMapGroupSlots %d", cs, abi.SwissMapGroupSlots)
+ if cs != abi.MapGroupSlots {
+ t.Errorf("ctrlGroup size got %d want abi.MapGroupSlots %d", cs, abi.MapGroupSlots)
}
}
}
func TestMapIndirect(t *testing.T) {
- type big [abi.SwissMapMaxKeyBytes + abi.SwissMapMaxElemBytes]byte
+ type big [abi.MapMaxKeyBytes + abi.MapMaxElemBytes]byte
m, typ := maps.NewTestMap[big, big](8)
}
const (
- belowMax = abi.SwissMapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
- atMax = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full.
+ belowMax = abi.MapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
+ atMax = (2 * abi.MapGroupSlots * maps.MaxAvgGroupLoad) / abi.MapGroupSlots // 2 groups at 7/8 full.
)
func TestTableGroupCount(t *testing.T) {
},
},
{
- n: abi.SwissMapGroupSlots,
+ n: abi.MapGroupSlots,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{0, 0},
},
},
{
- n: abi.SwissMapGroupSlots + 1,
+ n: abi.MapGroupSlots + 1,
escape: mapCase{
initialLit: mapCount{0, 0},
initialHint: mapCount{1, 2},
// hold onto it for very long.
//
//go:linkname runtime_mapaccess1 runtime.mapaccess1
-func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
}
//go:linkname runtime_mapaccess2 runtime.mapaccess2
-func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
}
//go:linkname runtime_mapassign runtime.mapassign
-func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+func runtime_mapassign(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmall(typ, hash, key)
if m.writing == 0 {
)
//go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
-func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
+func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32)
}
//go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
-func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsafe.Pointer, bool) {
+func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32)
}
}
-func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) unsafe.Pointer {
+func (m *Map) putSlotSmallFast32(typ *abi.MapType, hash uintptr, key uint32) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
}
//go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32
-func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
+func runtime_mapassign_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFast32(typ, hash, key)
if m.writing == 0 {
// TODO(prattmic): With some compiler refactoring we could avoid duplication of this function.
//
//go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr
-func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+func runtime_mapassign_fast32ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastPtr(typ, hash, key)
if m.writing == 0 {
}
//go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32
-func runtime_mapdelete_fast32(typ *abi.SwissMapType, m *Map, key uint32) {
+func runtime_mapdelete_fast32(typ *abi.MapType, m *Map, key uint32) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32)
)
//go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
-func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
+func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64)
}
//go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
-func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsafe.Pointer, bool) {
+func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64)
}
}
-func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64) unsafe.Pointer {
+func (m *Map) putSlotSmallFast64(typ *abi.MapType, hash uintptr, key uint64) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
}
//go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64
-func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
+func runtime_mapassign_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFast64(typ, hash, key)
if m.writing == 0 {
return slotElem
}
-func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
+func (m *Map) putSlotSmallFastPtr(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
// Key is a 64-bit pointer (only called on 64-bit GOARCH).
//
//go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr
-func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+func runtime_mapassign_fast64ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastPtr(typ, hash, key)
if m.writing == 0 {
}
//go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64
-func runtime_mapdelete_fast64(typ *abi.SwissMapType, m *Map, key uint64) {
+func runtime_mapdelete_fast64(typ *abi.MapType, m *Map, key uint64) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64)
"unsafe"
)
-func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer {
+func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
// for strings that are long enough that hashing is expensive.
if len(key) > 64 {
// String hashing and equality might be expensive. Do a quick check first.
- j := abi.SwissMapGroupSlots
- for i := range abi.SwissMapGroupSlots {
+ j := abi.MapGroupSlots
+ for i := range abi.MapGroupSlots {
if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) {
- if j < abi.SwissMapGroupSlots {
+ if j < abi.MapGroupSlots {
// 2 strings both passed the quick equality test.
// Break out of this loop and do it the slow way.
goto dohash
slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
ctrls >>= 8
}
- if j == abi.SwissMapGroupSlots {
+ if j == abi.MapGroupSlots {
// No slot passed the quick test.
return nil
}
ctrls = *g.ctrls()
slotKey = g.key(typ, 0)
- for range abi.SwissMapGroupSlots {
+ for range abi.MapGroupSlots {
if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
}
}
//go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
-func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
+func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr)
}
//go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
-func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) {
+func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr)
}
}
-func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer {
+func (m *Map) putSlotSmallFastStr(typ *abi.MapType, hash uintptr, key string) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
}
//go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
-func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
+func runtime_mapassign_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastStr(typ, hash, key)
if m.writing == 0 {
}
//go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
-func runtime_mapdelete_faststr(typ *abi.SwissMapType, m *Map, key string) {
+func runtime_mapdelete_faststr(typ *abi.MapType, m *Map, key string) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr)
used uint16
// The total number of slots (always 2^N). Equal to
- // `(groups.lengthMask+1)*abi.SwissMapGroupSlots`.
+ // `(groups.lengthMask+1)*abi.MapGroupSlots`.
capacity uint16
// The number of slots we can still fill without needing to rehash.
// directory).
index int
- // groups is an array of slot groups. Each group holds abi.SwissMapGroupSlots
+ // groups is an array of slot groups. Each group holds abi.MapGroupSlots
// key/elem slots and their control bytes. A table has a fixed size
// groups array. The table is replaced (in rehash) when more space is
// required.
groups groupsReference
}
-func newTable(typ *abi.SwissMapType, capacity uint64, index int, localDepth uint8) *table {
- if capacity < abi.SwissMapGroupSlots {
- capacity = abi.SwissMapGroupSlots
+func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
+ if capacity < abi.MapGroupSlots {
+ capacity = abi.MapGroupSlots
}
t := &table{
// reset resets the table with new, empty groups with the specified new total
// capacity.
-func (t *table) reset(typ *abi.SwissMapType, capacity uint16) {
- groupCount := uint64(capacity) / abi.SwissMapGroupSlots
+func (t *table) reset(typ *abi.MapType, capacity uint16) {
+ groupCount := uint64(capacity) / abi.MapGroupSlots
t.groups = newGroups(typ, groupCount)
t.capacity = capacity
t.growthLeft = t.maxGrowthLeft()
// No real reason to support zero capacity table, since an
// empty Map simply won't have a table.
panic("table must have positive capacity")
- } else if t.capacity <= abi.SwissMapGroupSlots {
+ } else if t.capacity <= abi.MapGroupSlots {
// If the map fits in a single group then we're able to fill all of
// the slots except 1 (an empty slot is needed to terminate find
// operations).
// TODO(prattmic): Do something cleaner.
panic("overflow")
}
- return (t.capacity * maxAvgGroupLoad) / abi.SwissMapGroupSlots
+ return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
}
}
// Get performs a lookup of the key that key points to. It returns a pointer to
// the element, or false if the key doesn't exist.
-func (t *table) Get(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
// TODO(prattmic): We could avoid hashing in a variety of special
// cases.
//
// expose updated elements. For NeedsKeyUpdate keys, iteration also must return
// the new key value, not the old key value.
// hash must be the hash of the key.
-func (t *table) getWithKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
+func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
// To find the location of a key in the table, we compute hash(key). From
// h1(hash(key)) and the capacity, we construct a probeSeq that visits
// every group of slots in some interesting order. See [probeSeq].
}
}
-func (t *table) getWithoutKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
// the new table.
//
// hash must be the hash of key.
-func (t *table) PutSlot(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
// As we look for a match, keep track of the first deleted slot we
// requires the caller to ensure that the referenced memory never
// changes (by sourcing those pointers from another indirect key/elem
// map).
-func (t *table) uncheckedPutSlot(typ *abi.SwissMapType, hash uintptr, key, elem unsafe.Pointer) {
+func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
if t.growthLeft == 0 {
panic("invariant failed: growthLeft is unexpectedly 0")
}
}
// Delete returns true if it put a tombstone in t.
-func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
+func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
// We really need to remove O(n) tombstones so we can pay for the cost
// of finding them. If we can't, then we need to grow (which is also O(n),
// but guarantees O(n) subsequent inserts can happen in constant time).
-func (t *table) pruneTombstones(typ *abi.SwissMapType, m *Map) {
+func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
if t.tombstones()*10 < t.capacity { // 10% of capacity
// Not enough tombstones to be worth the effort.
return
}
// Bit set marking all the groups whose tombstones are needed.
- var needed [(maxTableCapacity/abi.SwissMapGroupSlots + 31) / 32]uint32
+ var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
// Trace the probe sequence of every full entry.
for i := uint64(0); i <= t.groups.lengthMask; i++ {
// tombstone is a slot that has been deleted but is still considered occupied
// so as not to violate the probing invariant.
func (t *table) tombstones() uint16 {
- return (t.capacity*maxAvgGroupLoad)/abi.SwissMapGroupSlots - t.used - t.growthLeft
+ return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
}
// Clear deletes all entries from the map resulting in an empty map.
-func (t *table) Clear(typ *abi.SwissMapType) {
+func (t *table) Clear(typ *abi.MapType) {
mgl := t.maxGrowthLeft()
if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones
return
type Iter struct {
key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
- typ *abi.SwissMapType
+ typ *abi.MapType
m *Map
// Randomize iteration order by starting iteration at a random slot
}
// Init initializes Iter for iteration.
-func (it *Iter) Init(typ *abi.SwissMapType, m *Map) {
+func (it *Iter) Init(typ *abi.MapType, m *Map) {
it.typ = typ
if m == nil || m.used == 0 {
if it.dirIdx < 0 {
// Map was small at Init.
- for ; it.entryIdx < abi.SwissMapGroupSlots; it.entryIdx++ {
- k := uintptr(it.entryIdx+it.entryOffset) % abi.SwissMapGroupSlots
+ for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
+ k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted.
// match.
entryIdx := (it.entryIdx + it.entryOffset) & entryMask
- slotIdx := uintptr(entryIdx & (abi.SwissMapGroupSlots - 1))
+ slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
if slotIdx == 0 || it.group.data == nil {
// Only compute the group (a) when we switch
// groups (slotIdx rolls over) and (b) on the
// first iteration in this table (slotIdx may
// not be zero due to entryOffset).
- groupIdx := entryIdx >> abi.SwissMapGroupSlotsBits
+ groupIdx := entryIdx >> abi.MapGroupSlotsBits
it.group = it.tab.groups.group(it.typ, groupIdx)
}
var groupMatch bitset
for it.entryIdx <= entryMask {
entryIdx := (it.entryIdx + it.entryOffset) & entryMask
- slotIdx := uintptr(entryIdx & (abi.SwissMapGroupSlots - 1))
+ slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
if slotIdx == 0 || it.group.data == nil {
// Only compute the group (a) when we switch
// groups (slotIdx rolls over) and (b) on the
// first iteration in this table (slotIdx may
// not be zero due to entryOffset).
- groupIdx := entryIdx >> abi.SwissMapGroupSlotsBits
+ groupIdx := entryIdx >> abi.MapGroupSlotsBits
it.group = it.tab.groups.group(it.typ, groupIdx)
}
if groupMatch == 0 {
// Jump past remaining slots in this
// group.
- it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
+ it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
continue
}
// No more entries in this
// group. Continue to next
// group.
- it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
+ it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
continue
}
// No more entries in
// this group. Continue
// to next group.
- it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
+ it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
} else {
// Next full slot.
i := groupMatch.first()
// Replaces the table with one larger table or two split tables to fit more
// entries. Since the table is replaced, t is now stale and should not be
// modified.
-func (t *table) rehash(typ *abi.SwissMapType, m *Map) {
+func (t *table) rehash(typ *abi.MapType, m *Map) {
// TODO(prattmic): SwissTables typically perform a "rehash in place"
// operation which recovers capacity consumed by tombstones without growing
// the table by reordering slots as necessary to maintain the probe
}
// split the table into two, installing the new tables in the map directory.
-func (t *table) split(typ *abi.SwissMapType, m *Map) {
+func (t *table) split(typ *abi.MapType, m *Map) {
localDepth := t.localDepth
localDepth++
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted
continue
// and uncheckedPutting each element of the table into the new table (we know
// that no insertion here will Put an already-present value), and discard the
// old table.
-func (t *table) grow(typ *abi.SwissMapType, m *Map, newCapacity uint16) {
+func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
if t.capacity > 0 {
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted
continue
return s
}
-func (t *table) clone(typ *abi.SwissMapType) *table {
+func (t *table) clone(typ *abi.MapType) *table {
// Shallow copy the table structure.
t2 := new(table)
*t2 = *t
const debugLog = false
-func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
+func (t *table) checkInvariants(typ *abi.MapType, m *Map) {
if !debugLog {
return
}
var empty uint16
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
c := g.ctrls().get(j)
switch {
case c == ctrlDeleted:
panic("invariant failed: found mismatched used slot count")
}
- growthLeft := (t.capacity*maxAvgGroupLoad)/abi.SwissMapGroupSlots - t.used - deleted
+ growthLeft := (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - deleted
if growthLeft != t.growthLeft {
print("invariant failed: found ", t.growthLeft, " growthLeft, but expected ", growthLeft, "\n")
t.Print(typ, m)
panic("invariant failed: found no empty slots (violates probe invariant)")
}
}
-func (t *table) Print(typ *abi.SwissMapType, m *Map) {
+func (t *table) Print(typ *abi.MapType, m *Map) {
print(`table{
index: `, t.index, `
localDepth: `, t.localDepth, `
g := t.groups.group(typ, i)
ctrls := g.ctrls()
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
print("\t\t\tslot ", j, "\n")
c := ctrls.get(j)
if t.Kind() != Map {
panic("reflect: Key of non-map type " + t.String())
}
- tt := (*abi.SwissMapType)(unsafe.Pointer(t))
+ tt := (*abi.MapType)(unsafe.Pointer(t))
return toType(tt.Key)
}
// Look in known types.
s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
for _, tt := range typesByString(s) {
- mt := (*abi.SwissMapType)(unsafe.Pointer(tt))
+ mt := (*abi.MapType)(unsafe.Pointer(tt))
if mt.Key == ktyp && mt.Elem == etyp {
ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
- mt := **(**abi.SwissMapType)(unsafe.Pointer(&imap))
+ mt := **(**abi.MapType)(unsafe.Pointer(&imap))
mt.Str = resolveReflectName(newName(s, "", false, false))
mt.TFlag = abi.TFlagDirectIface
mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
mt.ElemOff = slot.Field(1).Offset
mt.Flags = 0
if needKeyUpdate(ktyp) {
- mt.Flags |= abi.SwissMapNeedKeyUpdate
+ mt.Flags |= abi.MapNeedKeyUpdate
}
if hashMightPanic(ktyp) {
- mt.Flags |= abi.SwissMapHashMightPanic
+ mt.Flags |= abi.MapHashMightPanic
}
- if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
- mt.Flags |= abi.SwissMapIndirectKey
+ if ktyp.Size_ > abi.MapMaxKeyBytes {
+ mt.Flags |= abi.MapIndirectKey
}
- if etyp.Size_ > abi.SwissMapMaxKeyBytes {
- mt.Flags |= abi.SwissMapIndirectElem
+ if etyp.Size_ > abi.MapMaxKeyBytes {
+ mt.Flags |= abi.MapIndirectElem
}
mt.PtrToThis = 0
func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
// type group struct {
// ctrl uint64
- // slots [abi.SwissMapGroupSlots]struct {
+ // slots [abi.MapGroupSlots]struct {
// key keyType
// elem elemType
// }
// }
- if ktyp.Size() > abi.SwissMapMaxKeyBytes {
+ if ktyp.Size() > abi.MapMaxKeyBytes {
ktyp = PointerTo(ktyp)
}
- if etyp.Size() > abi.SwissMapMaxElemBytes {
+ if etyp.Size() > abi.MapMaxElemBytes {
etyp = PointerTo(etyp)
}
},
{
Name: "Slots",
- Type: ArrayOf(abi.SwissMapGroupSlots, slot),
+ Type: ArrayOf(abi.MapGroupSlots, slot),
},
}
group := StructOf(fields)
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
- tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
+ tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
// Do not require key to be exported, so that DeepEqual
// and other programs can use all the keys returned by
// of unexported fields.
var e unsafe.Pointer
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
k := *(*string)(key.ptr)
e = mapaccess_faststr(v.typ(), v.pointer(), k)
} else {
// Equivalent to runtime.mapIterStart.
//
//go:noinline
-func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
race.ReadPC(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart))
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
v.mustBe(Map)
- tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
+ tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
keyType := tt.Key
fl := v.flag.ro() | flag(keyType.Kind())
panic("MapIter.Key called on exhausted iterator")
}
- t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
}
target = v.ptr
}
- t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
iter.m.mustBeExported() // do not let unexported m leak
panic("MapIter.Value called on exhausted iterator")
}
- t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
}
target = v.ptr
}
- t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
iter.m.mustBeExported() // do not let unexported m leak
panic("MapIter.Next called on an iterator that does not have an associated map Value")
}
if !iter.hiter.Initialized() {
- t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
m := (*maps.Map)(iter.m.pointer())
mapIterStart(t, m, &iter.hiter)
} else {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
- tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ()))
+ tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
k := *(*string)(key.ptr)
if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k)
// Fields from hiter.
key unsafe.Pointer
elem unsafe.Pointer
- typ *abi.SwissMapType
+ typ *abi.MapType
// The real iterator.
it *maps.Iter
// See go.dev/issue/67401.
//
//go:linkname mapiterinit
-func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
+func mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit))
// See go.dev/issue/67401.
//
//go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
+func reflect_mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
mapiterinit(t, m, it)
}
//go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign
var maps_errNilAssign error = plainError("assignment to entry in nil map")
-func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map {
+func makemap64(t *abi.MapType, hint int64, m *maps.Map) *maps.Map {
if int64(int(hint)) != hint {
hint = 0
}
}
// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots
+// make(map[k]v, hint) when hint is known to be at most abi.MapGroupSlots
// at compile time and the map needs to be allocated on the heap.
//
// makemap_small should be an internal detail,
// See go.dev/issue/67401.
//
//go:linkname makemap
-func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map {
+func makemap(t *abi.MapType, hint int, m *maps.Map) *maps.Map {
if hint < 0 {
hint = 0
}
// we want to avoid one layer of call.
//
//go:linkname mapaccess1
-func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapaccess1(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapaccess2 should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2
-func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
+func mapaccess2(t *abi.MapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
-func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
+func mapaccess1_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero
return e
}
-func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+func mapaccess2_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero, false
// See go.dev/issue/67401.
//
//go:linkname mapassign
-func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapdelete should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapdelete
-func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+func mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
// performs the first step of iteration. The Iter struct pointed to by 'it' is
// allocated on the stack by the compilers order pass or on the heap by
// reflect. Both need to have zeroed it since the struct contains pointers.
-func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart))
}
// mapclear deletes all keys from a map.
-func mapclear(t *abi.SwissMapType, m *maps.Map) {
+func mapclear(t *abi.MapType, m *maps.Map) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)
// See go.dev/issue/67401.
//
//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map {
+func reflect_makemap(t *abi.MapType, cap int) *maps.Map {
// Check invariants and reflects math.
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
// See go.dev/issue/67401.
//
//go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+func reflect_mapaccess(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
elem, ok := mapaccess2(t, m, key)
if !ok {
// reflect wants nil for a missing element
}
//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
-func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer {
+func reflect_mapaccess_faststr(t *abi.MapType, m *maps.Map, key string) unsafe.Pointer {
elem, ok := mapaccess2_faststr(t, m, key)
if !ok {
// reflect wants nil for a missing element
// Do not remove or change the type signature.
//
//go:linkname reflect_mapassign reflect.mapassign0
-func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
+func reflect_mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
p := mapassign(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
-func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) {
+func reflect_mapassign_faststr(t *abi.MapType, m *maps.Map, key string, elem unsafe.Pointer) {
p := mapassign_faststr(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+func reflect_mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
mapdelete(t, m, key)
}
//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
-func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) {
+func reflect_mapdelete_faststr(t *abi.MapType, m *maps.Map, key string) {
mapdelete_faststr(t, m, key)
}
}
//go:linkname reflect_mapclear reflect.mapclear
-func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) {
+func reflect_mapclear(t *abi.MapType, m *maps.Map) {
mapclear(t, m)
}
//go:linkname mapclone maps.clone
func mapclone(m any) any {
e := efaceOf(&m)
- typ := (*abi.SwissMapType)(unsafe.Pointer(e._type))
+ typ := (*abi.MapType)(unsafe.Pointer(e._type))
map_ := (*maps.Map)(e.data)
map_ = map_.Clone(typ)
e.data = (unsafe.Pointer)(map_)
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_fast32
-func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
+func mapaccess1_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer
// mapaccess2_fast32 should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast32
-func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)
+func mapaccess2_fast32(t *abi.MapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)
// mapassign_fast32 should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32
-func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
+func mapassign_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer
// mapassign_fast32ptr should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32ptr
-func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapassign_fast32ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
//go:linkname mapdelete_fast32
-func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32)
+func mapdelete_fast32(t *abi.MapType, m *maps.Map, key uint32)
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_fast64
-func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
+func mapaccess1_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer
// mapaccess2_fast64 should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast64
-func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)
+func mapaccess2_fast64(t *abi.MapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)
// mapassign_fast64 should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64
-func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
+func mapassign_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer
// mapassign_fast64ptr should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64ptr
-func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapassign_fast64ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
//go:linkname mapdelete_fast64
-func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64)
+func mapdelete_fast64(t *abi.MapType, m *maps.Map, key uint64)
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_faststr
-func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer
+func mapaccess1_faststr(t *abi.MapType, m *maps.Map, ky string) unsafe.Pointer
// mapaccess2_faststr should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_faststr
-func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool)
+func mapaccess2_faststr(t *abi.MapType, m *maps.Map, ky string) (unsafe.Pointer, bool)
// mapassign_faststr should be an internal detail,
// but widely used packages access it using linkname.
// See go.dev/issue/67401.
//
//go:linkname mapassign_faststr
-func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer
+func mapassign_faststr(t *abi.MapType, m *maps.Map, s string) unsafe.Pointer
//go:linkname mapdelete_faststr
-func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string)
+func mapdelete_faststr(t *abi.MapType, m *maps.Map, ky string)
func TestGroupSizeZero(t *testing.T) {
var m map[struct{}]struct{}
mTyp := abi.TypeOf(m)
- mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
+ mt := (*abi.MapType)(unsafe.Pointer(mTyp))
// internal/runtime/maps when create pointers to slots, even if slots
// are size 0. The compiler should have reserved an extra word to
return str(self.val.type)
def children(self):
- SwissMapGroupSlots = 8 # see internal/abi:SwissMapGroupSlots
+ MapGroupSlots = 8 # see internal/abi:MapGroupSlots
cnt = 0
# Yield keys and elements in group.
def group_slots(group):
ctrl = group['ctrl']
- for i in xrange(SwissMapGroupSlots):
+ for i in xrange(MapGroupSlots):
c = (ctrl >> (8*i)) & 0xff
if (c & 0x80) != 0:
# Empty or deleted
yield str(cnt+1), group['slots'][i]['elem']
# The linker DWARF generation
- # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records
+ # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records
# dirPtr as a **table[K,V], but it may actually be two different types:
#
# For "full size" maps (dirLen > 0), dirPtr is actually a pointer to
length = table['groups']['lengthMask'] + 1
# The linker DWARF generation
- # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records
+ # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records
# groups.data as a *group[K,V], but it is actually a pointer to
# variable length array *[length]group[K,V].
#
}
return true
case abi.Map:
- mt := (*abi.SwissMapType)(unsafe.Pointer(t))
- mv := (*abi.SwissMapType)(unsafe.Pointer(v))
+ mt := (*abi.MapType)(unsafe.Pointer(t))
+ mv := (*abi.MapType)(unsafe.Pointer(v))
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
case abi.Pointer:
pt := (*ptrtype)(unsafe.Pointer(t))