typ.Group.Size involves two loads.
Instead cache GroupSize as a separate fields of the map type
so we can get to it in just one load.
Change-Id: I10ffdce1c7f75dcf448da14040fda78f0d75fd1d
Reviewed-on: https://go-review.googlesource.com/c/go/+/627716
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
c.Field("Elem").WritePtr(s2)
c.Field("Group").WritePtr(s3)
c.Field("Hasher").WritePtr(hasher)
+ c.Field("GroupSize").WriteUintptr(uint64(gtyp.Size()))
c.Field("SlotSize").WriteUintptr(uint64(slotTyp.Size()))
c.Field("ElemOff").WriteUintptr(uint64(elemOff))
var flags uint32
off += 2 * arch.PtrSize
case abi.Map:
if buildcfg.Experiment.SwissMap {
- off += 6*arch.PtrSize + 4 // internal/abi.SwissMapType
+ off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType
if arch.PtrSize == 8 {
off += 4 // padding for final uint32 field (Flags).
}
SwissMapMaxKeyBytes = 128
SwissMapMaxElemBytes = 128
- ctrlEmpty = 0b10000000
- bitsetLSB = 0x0101010101010101
+ ctrlEmpty = 0b10000000
+ bitsetLSB = 0x0101010101010101
// Value of control word with all empty slots.
SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
Elem *Type
Group *Type // internal type representing a slot group
// function for hashing keys (ptr to key, seed) -> hash
- Hasher func(unsafe.Pointer, uintptr) uintptr
- SlotSize uintptr // size of key/elem slot
- ElemOff uintptr // offset of elem in key/elem slot
- Flags uint32
+ Hasher func(unsafe.Pointer, uintptr) uintptr
+ GroupSize uintptr // == Group.Size_
+ SlotSize uintptr // size of key/elem slot
+ ElemOff uintptr // offset of elem in key/elem slot
+ Flags uint32
}
// Flag values
// removeBelow removes all set bits below slot i (non-inclusive).
func (b bitset) removeBelow(i uintptr) bitset {
// Clear all bits below slot i's byte.
- mask := (uint64(1) << (8*uint64(i))) - 1
+ mask := (uint64(1) << (8 * uint64(i))) - 1
return b &^ bitset(mask)
}
func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference {
// TODO(prattmic): Do something here about truncation on cast to
// uintptr on 32-bit systems?
- offset := uintptr(i) * typ.Group.Size_
+ offset := uintptr(i) * typ.GroupSize
return groupReference{
data: unsafe.Pointer(uintptr(g.data) + offset),
if overflow {
return m // return an empty map.
} else {
- mem, overflow := math.MulUintptr(groups, mt.Group.Size_)
+ mem, overflow := math.MulUintptr(groups, mt.GroupSize)
if overflow || mem > maxAlloc {
return m // return an empty map.
}
key := *(*uint32)(keyPtr)
elem := *(*uint64)(elemPtr)
- if elem != 256 + uint64(key) {
- t.Errorf("iteration got key %d elem %d want elem %d", key, elem, 256 + uint64(key))
+ if elem != 256+uint64(key) {
+ t.Errorf("iteration got key %d elem %d want elem %d", key, elem, 256+uint64(key))
}
if _, ok := got[key]; ok {
t.Errorf("iteration got key %d more than once", key)
tab := m.TableFor(typ, unsafe.Pointer(&key))
start := tab.GroupsStart()
length := tab.GroupsLength()
- end := unsafe.Pointer(uintptr(start) + length*typ.Group.Size() - 1) // inclusive to ensure we have a valid pointer
+ end := unsafe.Pointer(uintptr(start) + length*typ.GroupSize - 1) // inclusive to ensure we have a valid pointer
if uintptr(got) < uintptr(start) || uintptr(got) > uintptr(end) {
t.Errorf("elem address outside groups allocation; got %p want [%p, %p]", got, start, end)
}