const (
mapslow = iota
mapfast32
+ mapfast32ptr
mapfast64
+ mapfast64ptr
mapfaststr
nmapfast
)
type mapnames [nmapfast]string
-func mkmapnames(base string) mapnames {
- return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"}
+func mkmapnames(base string, ptr string) mapnames {
+ return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
}
-var mapaccess1 = mkmapnames("mapaccess1")
-var mapaccess2 = mkmapnames("mapaccess2")
-var mapassign = mkmapnames("mapassign")
-var mapdelete = mkmapnames("mapdelete")
+var mapaccess1 = mkmapnames("mapaccess1", "")
+var mapaccess2 = mkmapnames("mapaccess2", "")
+var mapassign = mkmapnames("mapassign", "ptr")
+var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
// Check ../../runtime/hashmap.go:maxValueSize before changing.
}
switch algtype(t.Key()) {
case AMEM32:
- return mapfast32
+ if !t.Key().HasHeapPointer() {
+ return mapfast32
+ }
+ if Widthptr == 4 {
+ return mapfast32ptr
+ }
+ Fatalf("small pointer %v", t.Key())
case AMEM64:
- return mapfast64
+ if !t.Key().HasHeapPointer() {
+ return mapfast64
+ }
+ if Widthptr == 8 {
+ return mapfast64ptr
+ }
+ // Two-word object, at least one of which is a pointer.
+ // Use the slow path.
case ASTRING:
return mapfaststr
}
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
// store new key at insert position
- if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
- writebarrierptr((*uintptr)(insertk), uintptr(key))
- } else {
- *(*uint32)(insertk) = key
+ *(*uint32)(insertk) = key
+
+ h.count++
+
+done:
+ val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return val
+}
+
+func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
}
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
+ h.flags |= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast32(t, h, bucket)
+ }
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+
+ var insertb *bmap
+ var inserti uintptr
+ var insertk unsafe.Pointer
+
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] == empty {
+ if insertb == nil {
+ inserti = i
+ insertb = b
+ }
+ continue
+ }
+ k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
+ if k != key {
+ continue
+ }
+ inserti = i
+ insertb = b
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if insertb == nil {
+ // all current buckets are full, allocate a new one.
+ insertb = h.newoverflow(t, b)
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
+ }
+ insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+ // store new key at insert position
+ *(*unsafe.Pointer)(insertk) = key
h.count++
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
// store new key at insert position
- if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
- if sys.PtrSize == 8 {
- writebarrierptr((*uintptr)(insertk), uintptr(key))
- } else {
- // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
- // Give up and call typedmemmove.
- typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+ *(*uint64)(insertk) = key
+
+ h.count++
+
+done:
+ val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return val
+}
+
+func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc()
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
+ h.flags |= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & bucketMask(h.B)
+ if h.growing() {
+ growWork_fast64(t, h, bucket)
+ }
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+
+ var insertb *bmap
+ var inserti uintptr
+ var insertk unsafe.Pointer
+
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] == empty {
+ if insertb == nil {
+ insertb = b
+ inserti = i
+ }
+ continue
+ }
+ k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
+ if k != key {
+ continue
+ }
+ insertb = b
+ inserti = i
+ goto done
}
- } else {
- *(*uint64)(insertk) = key
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
}
+ if insertb == nil {
+ // all current buckets are full, allocate a new one.
+ insertb = h.newoverflow(t, b)
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
+ }
+ insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+ // store new key at insert position
+ *(*unsafe.Pointer)(insertk) = key
+
h.count++
done: