This is the same design as existing maps.
For #54766.
Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest-swissmap
Change-Id: I5f6ef5fea1e0f0616bcd90eaae7faee4cdac58c6
Reviewed-on: https://go-review.googlesource.com/c/go/+/616460
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
Reviewed-by: Keith Randall <khr@google.com>
//
// globalDepth uint8
// globalShift uint8
+ //
+ // writing uint8
// // N.B Padding
//
// clearSeq uint64
makefield("dirLen", types.Types[types.TINT]),
makefield("globalDepth", types.Types[types.TUINT8]),
makefield("globalShift", types.Types[types.TUINT8]),
+ makefield("writing", types.Types[types.TUINT8]),
makefield("clearSeq", types.Types[types.TUINT64]),
}
// The size of Map should be 48 bytes on 64 bit
// and 32 bytes on 32 bit platforms.
- if size := int64(2*8 + 4*types.PtrSize /* one extra for globalDepth + padding */); m.Size() != size {
+ if size := int64(2*8 + 4*types.PtrSize /* one extra for globalDepth/globalShift/writing + padding */); m.Size() != size {
base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", m.Size(), size)
}
// On 64-bit systems, this is 64 - globalDepth.
globalShift uint8
+ // writing is a flag that is toggled (XOR 1) while the map is being
+ // written. Normally it is set to 1 when writing, but if there are
+ // multiple concurrent writers, then toggling increases the probability
+ // that both sides will detect the race.
+ writing uint8
+
// clearSeq is a sequence counter of calls to Clear. It is used to
// detect map clears during iteration.
clearSeq uint64
return nil, nil, false
}
+ if m.writing != 0 {
+ fatal("concurrent map read and map write")
+ }
+
hash := typ.Hasher(key, m.seed)
if m.dirLen == 0 {
return nil, false
}
+ if m.writing != 0 {
+ fatal("concurrent map read and map write")
+ }
+
hash := typ.Hasher(key, m.seed)
if m.dirLen == 0 {
//
// PutSlot never returns nil.
func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer {
+ if m.writing != 0 {
+ fatal("concurrent map writes")
+ }
+
hash := typ.Hasher(key, m.seed)
+ // Set writing after calling Hasher, since Hasher may panic, in which
+ // case we have not actually done a write.
+ m.writing ^= 1 // toggle, see comment on writing
+
if m.dirPtr == nil {
m.growToSmall(typ)
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
- return m.putSlotSmall(typ, hash, key)
+ elem := m.putSlotSmall(typ, hash, key)
+
+ if m.writing == 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1
+
+ return elem
}
// Can't fit another entry, grow to full size map.
if !ok {
continue
}
+
+ if m.writing == 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1
+
return elem
}
}
return
}
+ if m.writing != 0 {
+ fatal("concurrent map writes")
+ }
+
hash := typ.Hasher(key, m.seed)
+ // Set writing after calling Hasher, since Hasher may panic, in which
+ // case we have not actually done a write.
+ m.writing ^= 1 // toggle, see comment on writing
+
if m.dirLen == 0 {
m.deleteSmall(typ, hash, key)
- return
+ } else {
+ idx := m.directoryIndex(hash)
+ m.directoryAt(idx).Delete(typ, m, key)
}
- idx := m.directoryIndex(hash)
- m.directoryAt(idx).Delete(typ, m, key)
+ if m.writing == 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1
}
func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) {
return
}
+ if m.writing != 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1 // toggle, see comment on writing
+
if m.dirLen == 0 {
m.clearSmall(typ)
- return
+ } else {
+ var lastTab *table
+ for i := range m.dirLen {
+ t := m.directoryAt(uintptr(i))
+ if t == lastTab {
+ continue
+ }
+ t.Clear(typ)
+ lastTab = t
+ }
+ m.used = 0
+ m.clearSeq++
+ // TODO: shrink directory?
}
- var lastTab *table
- for i := range m.dirLen {
- t := m.directoryAt(uintptr(i))
- if t == lastTab {
- continue
- }
- t.Clear(typ)
- lastTab = t
+ if m.writing == 0 {
+ fatal("concurrent map writes")
}
- m.used = 0
- m.clearSeq++
- // TODO: shrink directory?
+ m.writing ^= 1
}
func (m *Map) clearSmall(typ *abi.SwissMapType) {
//
//go:linkname runtime_mapaccess1 runtime.mapaccess1
func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
- // TODO: concurrent checks.
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1)
return unsafe.Pointer(&zeroVal[0])
}
+ if m.writing != 0 {
+ fatal("concurrent map read and map write")
+ }
+
hash := typ.Hasher(key, m.seed)
if m.dirLen <= 0 {
//go:linkname runtime_mapassign runtime.mapassign
func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
- // TODO: concurrent checks.
if m == nil {
panic(errNilAssign)
}
if asan.Enabled {
asan.Read(key, typ.Key.Size_)
}
+ if m.writing != 0 {
+ fatal("concurrent map writes")
+ }
hash := typ.Hasher(key, m.seed)
+ // Set writing after calling Hasher, since Hasher may panic, in which
+ // case we have not actually done a write.
+ m.writing ^= 1 // toggle, see comment on writing
+
if m.dirPtr == nil {
m.growToSmall(typ)
}
if m.dirLen == 0 {
if m.used < abi.SwissMapGroupSlots {
- return m.putSlotSmall(typ, hash, key)
+ elem := m.putSlotSmall(typ, hash, key)
+
+ if m.writing == 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1
+
+ return elem
}
// Can't fit another entry, grow to full size map.
m.growToTable(typ)
}
+ var slotElem unsafe.Pointer
outer:
for {
// Select table.
typedmemmove(typ.Key, slotKey, key)
}
- slotElem := g.elem(typ, i)
+ slotElem = g.elem(typ, i)
t.checkInvariants(typ)
- return slotElem
+ break outer
}
match = match.removeFirst()
}
if t.growthLeft > 0 {
slotKey := g.key(typ, i)
typedmemmove(typ.Key, slotKey, key)
- slotElem := g.elem(typ, i)
+ slotElem = g.elem(typ, i)
g.ctrls().set(i, ctrl(h2(hash)))
t.growthLeft--
m.used++
t.checkInvariants(typ)
- return slotElem
+ break outer
}
t.rehash(typ, m)
}
}
}
+
+ if m.writing == 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1
+
+ return slotElem
}
return
}
+ if it.m.writing != 0 {
+ fatal("concurrent map iteration and map write")
+ }
+
if it.dirIdx < 0 {
// Map was small at Init.
g := it.groupSmall
func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
- // TODO: concurrent checks.
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapaccess2)
func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
- // TODO: concurrent checks.
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
}
func mapiternext(it *maps.Iter) {
- // TODO: concurrent checks.
if raceenabled {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapiternext))
// mapclear deletes all keys from a map.
func mapclear(t *abi.SwissMapType, m *maps.Map) {
- // TODO: concurrent checks.
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)