return h & 0x7f
}
+// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map_swiss.go:SwissMapType.
type Map struct {
// The number of filled slots (i.e. the number of elements in all
// tables). Excludes deleted slots.
// that both sides will detect the race.
writing uint8
+ // tombstonePossible is false if we know that no table in this map
+ // contains a tombstone.
+ tombstonePossible bool
+
// clearSeq is a sequence counter of calls to Clear. It is used to
// detect map clears during iteration.
clearSeq uint64
m.deleteSmall(typ, hash, key)
} else {
idx := m.directoryIndex(hash)
- m.directoryAt(idx).Delete(typ, m, hash, key)
+ if m.directoryAt(idx).Delete(typ, m, hash, key) {
+ m.tombstonePossible = true
+ }
}
if m.used == 0 {
// Clear deletes all entries from the map resulting in an empty map.
func (m *Map) Clear(typ *abi.SwissMapType) {
- if m == nil || m.Used() == 0 {
+ if m == nil || m.Used() == 0 && !m.tombstonePossible {
return
}
lastTab = t
}
m.used = 0
- m.clearSeq++
+ m.tombstonePossible = false
// TODO: shrink directory?
}
+ m.clearSeq++
// Reset the hash seed to make it more difficult for attackers to
// repeatedly trigger hash collisions. See https://go.dev/issue/25237.
g.ctrls().setEmpty()
m.used = 0
- m.clearSeq++
}
func (m *Map) Clone(typ *abi.SwissMapType) *Map {
groupCount := uint64(capacity) / abi.SwissMapGroupSlots
t.groups = newGroups(typ, groupCount)
t.capacity = capacity
- t.resetGrowthLeft()
+ t.growthLeft = t.maxGrowthLeft()
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
}
}
-// Preconditions: table must be empty.
-func (t *table) resetGrowthLeft() {
- var growthLeft uint16
+// maxGrowthLeft is the number of inserts we can do before
+// resizing, starting from an empty table.
+func (t *table) maxGrowthLeft() uint16 {
if t.capacity == 0 {
// No real reason to support zero capacity table, since an
// empty Map simply won't have a table.
//
// TODO(go.dev/issue/54766): With a special case in probing for
// single-group tables, we could fill all slots.
- growthLeft = t.capacity - 1
+ return t.capacity - 1
} else {
if t.capacity*maxAvgGroupLoad < t.capacity {
// TODO(prattmic): Do something cleaner.
panic("overflow")
}
- growthLeft = (t.capacity * maxAvgGroupLoad) / abi.SwissMapGroupSlots
+ return (t.capacity * maxAvgGroupLoad) / abi.SwissMapGroupSlots
}
- t.growthLeft = growthLeft
+
}
func (t *table) Used() uint64 {
}
}
-func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) {
+// Delete returns true if it put a tombstone in t.
+func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
// full now, we can simply remove the element.
// Otherwise, we create a tombstone to mark the
// slot as deleted.
+ var tombstone bool
if g.ctrls().matchEmpty() != 0 {
g.ctrls().set(i, ctrlEmpty)
t.growthLeft++
} else {
g.ctrls().set(i, ctrlDeleted)
+ tombstone = true
}
t.checkInvariants(typ, m)
- return
+ return tombstone
}
match = match.removeFirst()
}
if match != 0 {
// Finding an empty slot means we've reached the end of
// the probe sequence.
- return
+ return false
}
}
}
// Clear deletes all entries from the map resulting in an empty map.
func (t *table) Clear(typ *abi.SwissMapType) {
+ mgl := t.maxGrowthLeft()
+ if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones
+ return
+ }
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
- typedmemclr(typ.Group, g.data)
+ if g.ctrls().matchFull() != 0 {
+ typedmemclr(typ.Group, g.data)
+ }
g.ctrls().setEmpty()
}
-
t.used = 0
- t.resetGrowthLeft()
+ t.growthLeft = mgl
}
type Iter struct {