We use the same heuristics as existing maps.
For #54766.
Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest-swissmap
Change-Id: I44bb51483cae2c1714717f1b501850fb9e55a39a
Reviewed-on: https://go-review.googlesource.com/c/go/+/616461
Auto-Submit: Michael Pratt <mpratt@google.com>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
// elem elemType
// }
// }
+
+ keytype := t.Key()
+ elemtype := t.Elem()
+ types.CalcSize(keytype)
+ types.CalcSize(elemtype)
+ if keytype.Size() > abi.SwissMapMaxKeyBytes {
+ keytype = types.NewPtr(keytype)
+ }
+ if elemtype.Size() > abi.SwissMapMaxElemBytes {
+ elemtype = types.NewPtr(elemtype)
+ }
+
slotFields := []*types.Field{
- makefield("key", t.Key()),
- makefield("elem", t.Elem()),
+ makefield("key", keytype),
+ makefield("elem", elemtype),
}
slot := types.NewStruct(slotFields)
slot.SetNoalg(true)
// the end to ensure pointers are valid.
base.Fatalf("bad group size for %v", t)
}
+ if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() {
+ base.Fatalf("key indirect incorrect for %v", t)
+ }
+ if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() {
+ base.Fatalf("elem indirect incorrect for %v", t)
+ }
t.MapType().SwissGroup = group
group.StructType().Map = t
if hashMightPanic(t.Key()) {
flags |= abi.SwissMapHashMightPanic
}
+ if t.Key().Size() > abi.SwissMapMaxKeyBytes {
+ flags |= abi.SwissMapIndirectKey
+ }
+ if t.Elem().Size() > abi.SwissMapMaxKeyBytes {
+ flags |= abi.SwissMapIndirectElem
+ }
c.Field("Flags").WriteUint32(flags)
if u := t.Underlying(); u != t {
// Number of slots in a group.
SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8
+
+ // Maximum key or elem size to keep inline (instead of mallocing per element).
+ // Must fit in a uint8.
+ SwissMapMaxKeyBytes = 128
+ SwissMapMaxElemBytes = 128
)
type SwissMapType struct {
const (
SwissMapNeedKeyUpdate = 1 << iota
SwissMapHashMightPanic
+ SwissMapIndirectKey
+ SwissMapIndirectElem
)
func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&SwissMapHashMightPanic != 0
}
+func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself
+ return mt.Flags&SwissMapIndirectKey != 0
+}
+func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
+ return mt.Flags&SwissMapIndirectElem != 0
+}
if g.ctrls().get(j) == ctrlDeleted {
continue
}
- return g.key(typ, j)
+ slotKey := g.key(typ, j)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
+ return slotKey
}
}
}
}
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
- return slotKey, g.elem(typ, i), true
+ slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
+ return slotKey, slotElem, true
}
}
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
if typ.NeedKeyUpdate() {
typedmemmove(typ.Key, slotKey, key)
}
slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
return slotElem
}
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ kmem := newobject(typ.Key)
+ *(*unsafe.Pointer)(slotKey) = kmem
+ slotKey = kmem
+ }
typedmemmove(typ.Key, slotKey, key)
+
slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ emem := newobject(typ.Elem)
+ *(*unsafe.Pointer)(slotElem) = emem
+ slotElem = emem
+ }
g.ctrls().set(i, ctrl(h2(hash)))
m.used++
// Empty
continue
}
+
key := g.key(typ, i)
+ if typ.IndirectKey() {
+ key = *((*unsafe.Pointer)(key))
+ }
+
elem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
+
hash := typ.Hasher(key, m.seed)
+
+ // TODO(prattmic): For indirect key/elem, this is
+ // allocating new objects for key/elem. That is
+ // unnecessary; the new table could simply point to the
+ // existing object.
slotElem := tab.uncheckedPutSlot(typ, hash, key)
typedmemmove(typ.Elem, slotElem, elem)
tab.used++
for match != 0 {
i := match.first()
slotKey := g.key(typ, i)
+ origSlotKey := slotKey
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
m.used--
- typedmemclr(typ.Key, slotKey)
- typedmemclr(typ.Elem, g.elem(typ, i))
+ if typ.IndirectKey() {
+ // Clearing the pointer is sufficient.
+ *(*unsafe.Pointer)(origSlotKey) = nil
+ } else if typ.Key.Pointers() {
+ // Only bother clearing if there are pointers.
+ typedmemclr(typ.Key, slotKey)
+ }
+
+ slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ // Clearing the pointer is sufficient.
+ *(*unsafe.Pointer)(slotElem) = nil
+ } else {
+ // Unlike keys, always clear the elem (even if
+ // it contains no pointers), as compound
+ // assignment operations depend on cleared
+ // deleted values. See
+ // https://go.dev/issue/25936.
+ typedmemclr(typ.Elem, slotElem)
+ }
// We only have 1 group, so it is OK to immediately
// reuse deleted slots.
t.Errorf("elem address outside groups allocation; got %p want [%p, %p]", got, start, end)
}
}
+
+func TestMapIndirect(t *testing.T) {
+ type big [abi.SwissMapMaxKeyBytes + abi.SwissMapMaxElemBytes]byte
+
+ m, typ := maps.NewTestMap[big, big](8)
+
+ key := big{}
+ elem := big{}
+ elem[0] = 128
+
+ for i := 0; i < 31; i++ {
+ key[0] += 1
+ elem[0] += 1
+ m.Put(typ, unsafe.Pointer(&key), unsafe.Pointer(&elem))
+
+ if maps.DebugLog {
+ fmt.Printf("After put %v: %v\n", key, m)
+ }
+ }
+
+ if m.Used() != 31 {
+ t.Errorf("Used() used got %d want 31", m.Used())
+ }
+
+ key = big{}
+ elem = big{}
+ elem[0] = 128
+
+ for i := 0; i < 31; i++ {
+ key[0] += 1
+ elem[0] += 1
+ got, ok := m.Get(typ, unsafe.Pointer(&key))
+ if !ok {
+ t.Errorf("Get(%v) got ok false want true", key)
+ }
+ gotElem := *(*big)(got)
+ if gotElem != elem {
+ t.Errorf("Get(%v) got elem %v want %v", key, gotElem, elem)
+ }
+ }
+}
+
+// Delete should clear element. See https://go.dev/issue/25936.
+func TestMapDeleteClear(t *testing.T) {
+ m, typ := maps.NewTestMap[int64, int64](8)
+
+ key := int64(0)
+ elem := int64(128)
+
+ m.Put(typ, unsafe.Pointer(&key), unsafe.Pointer(&elem))
+
+ if maps.DebugLog {
+ fmt.Printf("After put %d: %v\n", key, m)
+ }
+
+ got, ok := m.Get(typ, unsafe.Pointer(&key))
+ if !ok {
+ t.Errorf("Get(%d) got ok false want true", key)
+ }
+ gotElem := *(*int64)(got)
+ if gotElem != elem {
+ t.Errorf("Get(%d) got elem %d want %d", key, gotElem, elem)
+ }
+
+ m.Delete(typ, unsafe.Pointer(&key))
+
+ gotElem = *(*int64)(got)
+ if gotElem != 0 {
+ t.Errorf("Delete(%d) failed to clear element. got %d want 0", key, gotElem)
+ }
+}
//go:linkname newarray
func newarray(typ *abi.Type, n int) unsafe.Pointer
+
+//go:linkname newobject
+func newobject(typ *abi.Type) unsafe.Pointer
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
- return g.elem(typ, i)
+ slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
+ return slotElem
}
match = match.removeFirst()
}
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
if typ.NeedKeyUpdate() {
typedmemmove(typ.Key, slotKey, key)
}
slotElem = g.elem(typ, i)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
t.checkInvariants(typ)
break outer
// If there is room left to grow, just insert the new entry.
if t.growthLeft > 0 {
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ kmem := newobject(typ.Key)
+ *(*unsafe.Pointer)(slotKey) = kmem
+ slotKey = kmem
+ }
typedmemmove(typ.Key, slotKey, key)
+
slotElem = g.elem(typ, i)
+ if typ.IndirectElem() {
+ emem := newobject(typ.Elem)
+ *(*unsafe.Pointer)(slotElem) = emem
+ slotElem = emem
+ }
g.ctrls().set(i, ctrl(h2(hash)))
t.growthLeft--
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
- return slotKey, g.elem(typ, i), true
+ slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
+ return slotKey, slotElem, true
}
match = match.removeFirst()
}
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
- return g.elem(typ, i), true
+ slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
+ return slotElem, true
}
match = match.removeFirst()
}
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
if typ.Key.Equal(key, slotKey) {
if typ.NeedKeyUpdate() {
typedmemmove(typ.Key, slotKey, key)
}
slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
t.checkInvariants(typ)
return slotElem, true
// If there is room left to grow, just insert the new entry.
if t.growthLeft > 0 {
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ kmem := newobject(typ.Key)
+ *(*unsafe.Pointer)(slotKey) = kmem
+ slotKey = kmem
+ }
typedmemmove(typ.Key, slotKey, key)
+
slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ emem := newobject(typ.Elem)
+ *(*unsafe.Pointer)(slotElem) = emem
+ slotElem = emem
+ }
g.ctrls().set(i, ctrl(h2(hash)))
t.growthLeft--
i := match.first()
slotKey := g.key(typ, i)
+ if typ.IndirectKey() {
+ kmem := newobject(typ.Key)
+ *(*unsafe.Pointer)(slotKey) = kmem
+ slotKey = kmem
+ }
typedmemmove(typ.Key, slotKey, key)
+
slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ emem := newobject(typ.Elem)
+ *(*unsafe.Pointer)(slotElem) = emem
+ slotElem = emem
+ }
if g.ctrls().get(i) == ctrlEmpty {
t.growthLeft--
for match != 0 {
i := match.first()
+
slotKey := g.key(typ, i)
+ origSlotKey := slotKey
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
+
if typ.Key.Equal(key, slotKey) {
t.used--
m.used--
- typedmemclr(typ.Key, slotKey)
- typedmemclr(typ.Elem, g.elem(typ, i))
+ if typ.IndirectKey() {
+ // Clearing the pointer is sufficient.
+ *(*unsafe.Pointer)(origSlotKey) = nil
+ } else if typ.Key.Pointers() {
+ // Only bothing clear the key if there
+ // are pointers in it.
+ typedmemclr(typ.Key, slotKey)
+ }
+
+ slotElem := g.elem(typ, i)
+ if typ.IndirectElem() {
+ // Clearing the pointer is sufficient.
+ *(*unsafe.Pointer)(slotElem) = nil
+ } else {
+ // Unlike keys, always clear the elem (even if
+ // it contains no pointers), as compound
+ // assignment operations depend on cleared
+ // deleted values. See
+ // https://go.dev/issue/25936.
+ typedmemclr(typ.Elem, slotElem)
+ }
// Only a full group can appear in the middle
// of a probe sequence (a group with at least
}
key := g.key(it.typ, k)
+ if it.typ.IndirectKey() {
+ key = *((*unsafe.Pointer)(key))
+ }
// As below, if we have grown to a full map since Init,
// we continue to use the old group to decide the keys
// See comment below.
if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
elem = g.elem(it.typ, k)
+ if it.typ.IndirectElem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
} else {
continue
}
}
} else {
elem = g.elem(it.typ, k)
+ if it.typ.IndirectElem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
}
it.entryIdx++
}
key := g.key(it.typ, slotIdx)
+ if it.typ.IndirectKey() {
+ key = *((*unsafe.Pointer)(key))
+ }
// If the table has changed since the last
// call, then it has grown or split. In this
// clear.
if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
elem = g.elem(it.typ, slotIdx)
+ if it.typ.IndirectElem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
} else {
continue
}
}
} else {
elem = g.elem(it.typ, slotIdx)
+ if it.typ.IndirectElem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
}
it.entryIdx++
// Empty or deleted
continue
}
+
key := g.key(typ, j)
+ if typ.IndirectKey() {
+ key = *((*unsafe.Pointer)(key))
+ }
+
elem := g.elem(typ, j)
+ if typ.IndirectElem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
+
hash := typ.Hasher(key, t.seed)
var newTable *table
if hash&mask == 0 {
} else {
newTable = right
}
+ // TODO(prattmic): For indirect key/elem, this is
+ // allocating new objects for key/elem. That is
+ // unnecessary; the new table could simply point to the
+ // existing object.
slotElem := newTable.uncheckedPutSlot(typ, hash, key)
typedmemmove(typ.Elem, slotElem, elem)
newTable.used++
// Empty or deleted
continue
}
+
key := g.key(typ, j)
+ if typ.IndirectKey() {
+ key = *((*unsafe.Pointer)(key))
+ }
+
elem := g.elem(typ, j)
+ if typ.IndirectElem() {
+ elem = *((*unsafe.Pointer)(elem))
+ }
+
hash := typ.Hasher(key, t.seed)
+
+ // TODO(prattmic): For indirect key/elem, this is
+ // allocating new objects for key/elem. That is
+ // unnecessary; the new table could simply point to the
+ // existing object.
slotElem := newTable.uncheckedPutSlot(typ, hash, key)
typedmemmove(typ.Elem, slotElem, elem)
newTable.used++
used++
key := g.key(typ, j)
+ if typ.IndirectKey() {
+ key = *((*unsafe.Pointer)(key))
+ }
// Can't lookup keys that don't compare equal
// to themselves (e.g., NaN).
mt.SlotSize = slot.Size()
mt.ElemOff = slot.Field(1).Offset
mt.Flags = 0
- // TODO(prattmic): indirect key/elem flags
if needKeyUpdate(ktyp) {
mt.Flags |= abi.SwissMapNeedKeyUpdate
}
if hashMightPanic(ktyp) {
mt.Flags |= abi.SwissMapHashMightPanic
}
+ if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
+ mt.Flags |= abi.SwissMapIndirectKey
+ }
+ if etyp.Size_ > abi.SwissMapMaxKeyBytes {
+ mt.Flags |= abi.SwissMapIndirectElem
+ }
mt.PtrToThis = 0
ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
}
func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
- // TODO(prattmic): indirect key/elem flags
-
// type group struct {
// ctrl uint64
// slots [abi.SwissMapGroupSlots]struct {
// }
// }
+ if ktyp.Size() > abi.SwissMapMaxKeyBytes {
+ ktyp = PointerTo(ktyp)
+ }
+ if etyp.Size() > abi.SwissMapMaxElemBytes {
+ etyp = PointerTo(etyp)
+ }
+
fields := []StructField{
{
Name: "Key",
return mallocgc(typ.Size_, typ, true)
}
+//go:linkname maps_newobject internal/runtime/maps.newobject
+func maps_newobject(typ *_type) unsafe.Pointer {
+ return newobject(typ)
+}
+
// reflect_unsafe_New is meant for package reflect,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include: