]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile: fix mapassign_fast* routines for pointer keys
authorKeith Randall <keithr@alum.mit.edu>
Tue, 21 Nov 2017 15:14:11 +0000 (07:14 -0800)
committerKeith Randall <khr@golang.org>
Wed, 22 Nov 2017 04:30:27 +0000 (04:30 +0000)
The signature of the mapassign_fast* routines need to distinguish
the pointerness of their key argument.  If the affected routines
suspend part way through, the object pointed to by the key might
get garbage collected because the key is typed as a uint{32,64}.

This is not a problem for mapaccess or mapdelete because the key
in those situations do not live beyond the call involved.  If the
object referenced by the key is garbage collected prematurely, the
code still works fine.  Even if that object is subsequently reallocated,
it can't be written to the map in time to affect the lookup/delete.

Fixes #22781

Change-Id: I0bbbc5e9883d5ce702faf4e655348be1191ee439
Reviewed-on: https://go-review.googlesource.com/79018
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
src/cmd/compile/internal/gc/builtin.go
src/cmd/compile/internal/gc/builtin/runtime.go
src/cmd/compile/internal/gc/walk.go
src/runtime/hashmap_fast.go
test/fixedbugs/issue22781.go [new file with mode: 0644]

index b865d2f3baa28758039ec578754675dff072c34f..0733a460d5b6d8c884f747059ac1fed82e7ca6de 100644 (file)
@@ -88,7 +88,9 @@ var runtimeDecls = [...]struct {
        {"mapaccess2_fat", funcTag, 70},
        {"mapassign", funcTag, 65},
        {"mapassign_fast32", funcTag, 66},
+       {"mapassign_fast32ptr", funcTag, 66},
        {"mapassign_fast64", funcTag, 66},
+       {"mapassign_fast64ptr", funcTag, 66},
        {"mapassign_faststr", funcTag, 66},
        {"mapiterinit", funcTag, 71},
        {"mapdelete", funcTag, 71},
index a27abcafa2fe35f8c8c1228165627d7098676545..de17d51d8ab20d216a6b67cdbb641ac7a8f1fefe 100644 (file)
@@ -109,7 +109,9 @@ func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pre
 func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
 func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
 func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any)
 func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
+func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any)
 func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
 func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
 func mapdelete(mapType *byte, hmap map[any]any, key *any)
index 7e47c286e28156acc0cccd6f5156d8f4a5d36f78..cbbd635f6d52ecc0edd26c3ae283d67bb186157f 100644 (file)
@@ -2826,21 +2826,23 @@ func mapfndel(name string, t *types.Type) *Node {
 const (
        mapslow = iota
        mapfast32
+       mapfast32ptr
        mapfast64
+       mapfast64ptr
        mapfaststr
        nmapfast
 )
 
 type mapnames [nmapfast]string
 
-func mkmapnames(base string) mapnames {
-       return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"}
+func mkmapnames(base string, ptr string) mapnames {
+       return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
 }
 
-var mapaccess1 = mkmapnames("mapaccess1")
-var mapaccess2 = mkmapnames("mapaccess2")
-var mapassign = mkmapnames("mapassign")
-var mapdelete = mkmapnames("mapdelete")
+var mapaccess1 = mkmapnames("mapaccess1", "")
+var mapaccess2 = mkmapnames("mapaccess2", "")
+var mapassign = mkmapnames("mapassign", "ptr")
+var mapdelete = mkmapnames("mapdelete", "")
 
 func mapfast(t *types.Type) int {
        // Check ../../runtime/hashmap.go:maxValueSize before changing.
@@ -2849,9 +2851,22 @@ func mapfast(t *types.Type) int {
        }
        switch algtype(t.Key()) {
        case AMEM32:
-               return mapfast32
+               if !t.Key().HasHeapPointer() {
+                       return mapfast32
+               }
+               if Widthptr == 4 {
+                       return mapfast32ptr
+               }
+               Fatalf("small pointer %v", t.Key())
        case AMEM64:
-               return mapfast64
+               if !t.Key().HasHeapPointer() {
+                       return mapfast64
+               }
+               if Widthptr == 8 {
+                       return mapfast64ptr
+               }
+               // Two-word object, at least one of which is a pointer.
+               // Use the slow path.
        case ASTRING:
                return mapfaststr
        }
index 4dc876fb1d7681600c0033936d157321ee108523..2de381412bec4bc8d9b27c13150828bcf05d4264 100644 (file)
@@ -420,11 +420,93 @@ again:
 
        insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
        // store new key at insert position
-       if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
-               writebarrierptr((*uintptr)(insertk), uintptr(key))
-       } else {
-               *(*uint32)(insertk) = key
+       *(*uint32)(insertk) = key
+
+       h.count++
+
+done:
+       val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
+       if h.flags&hashWriting == 0 {
+               throw("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       return val
+}
+
+func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map writes")
        }
+       hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling alg.hash for consistency with mapassign.
+       h.flags |= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast32(t, h, bucket)
+       }
+       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+
+       var insertb *bmap
+       var inserti uintptr
+       var insertk unsafe.Pointer
+
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       if b.tophash[i] == empty {
+                               if insertb == nil {
+                                       inserti = i
+                                       insertb = b
+                               }
+                               continue
+                       }
+                       k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
+                       if k != key {
+                               continue
+                       }
+                       inserti = i
+                       insertb = b
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if insertb == nil {
+               // all current buckets are full, allocate a new one.
+               insertb = h.newoverflow(t, b)
+               inserti = 0 // not necessary, but avoids needlessly spilling inserti
+       }
+       insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+       // store new key at insert position
+       *(*unsafe.Pointer)(insertk) = key
 
        h.count++
 
@@ -510,18 +592,94 @@ again:
 
        insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
        // store new key at insert position
-       if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
-               if sys.PtrSize == 8 {
-                       writebarrierptr((*uintptr)(insertk), uintptr(key))
-               } else {
-                       // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
-                       // Give up and call typedmemmove.
-                       typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+       *(*uint64)(insertk) = key
+
+       h.count++
+
+done:
+       val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
+       if h.flags&hashWriting == 0 {
+               throw("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+       return val
+}
+
+func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc()
+               racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map writes")
+       }
+       hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+       // Set hashWriting after calling alg.hash for consistency with mapassign.
+       h.flags |= hashWriting
+
+       if h.buckets == nil {
+               h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & bucketMask(h.B)
+       if h.growing() {
+               growWork_fast64(t, h, bucket)
+       }
+       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+
+       var insertb *bmap
+       var inserti uintptr
+       var insertk unsafe.Pointer
+
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       if b.tophash[i] == empty {
+                               if insertb == nil {
+                                       insertb = b
+                                       inserti = i
+                               }
+                               continue
+                       }
+                       k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
+                       if k != key {
+                               continue
+                       }
+                       insertb = b
+                       inserti = i
+                       goto done
                }
-       } else {
-               *(*uint64)(insertk) = key
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // Did not find mapping for key. Allocate new cell & add entry.
+
+       // If we hit the max load factor or we have too many overflow buckets,
+       // and we're not already in the middle of growing, start growing.
+       if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
        }
 
+       if insertb == nil {
+               // all current buckets are full, allocate a new one.
+               insertb = h.newoverflow(t, b)
+               inserti = 0 // not necessary, but avoids needlessly spilling inserti
+       }
+       insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
+
+       insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+       // store new key at insert position
+       *(*unsafe.Pointer)(insertk) = key
+
        h.count++
 
 done:
diff --git a/test/fixedbugs/issue22781.go b/test/fixedbugs/issue22781.go
new file mode 100644 (file)
index 0000000..5ad8239
--- /dev/null
@@ -0,0 +1,29 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "runtime/debug"
+
+type T struct {
+       // >= 16 bytes to avoid tiny alloc.
+       a, b int
+}
+
+func main() {
+       debug.SetGCPercent(1)
+       for i := 0; i < 100000; i++ {
+               m := make(map[*T]struct{}, 0)
+               for j := 0; j < 20; j++ {
+                       // During the call to mapassign_fast64, the key argument
+                       // was incorrectly treated as a uint64. If the stack was
+                       // scanned during that call, the only pointer to k was
+                       // missed, leading to *k being collected prematurely.
+                       k := new(T)
+                       m[k] = struct{}{}
+               }
+       }
+}