]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: only check for pointers up to ptrdata, not size
authorIan Lance Taylor <iant@golang.org>
Fri, 3 Apr 2020 00:14:25 +0000 (17:14 -0700)
committerIan Lance Taylor <iant@golang.org>
Wed, 8 Apr 2020 04:18:04 +0000 (04:18 +0000)
Change-Id: I166cf253b7f2483d652c98d2fba36c380e2f3347
Reviewed-on: https://go-review.googlesource.com/c/go/+/227177
Run-TryBot: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
src/runtime/mbarrier.go
src/runtime/slice.go

index df3ab6fc3c6f2418ff1b93e3c072c7ed6de7640a..941324782c66aaa01fb868ec061da8ea07f74ab5 100644 (file)
@@ -158,7 +158,7 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
                return
        }
        if typ.ptrdata != 0 {
-               bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.size)
+               bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata)
        }
        // There's a race here: if some other goroutine can write to
        // src, it may change some pointer in src after we've
@@ -198,12 +198,27 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
        if writeBarrier.needed && typ.ptrdata != 0 && size >= sys.PtrSize {
                // Pointer-align start address for bulk barrier.
                adst, asrc, asize := dst, src, size
+               ptrdata := typ.ptrdata
+               if ptrdata > off {
+                       ptrdata -= off
+               } else {
+                       ptrdata = 0
+               }
                if frag := -off & (sys.PtrSize - 1); frag != 0 {
                        adst = add(dst, frag)
                        asrc = add(src, frag)
                        asize -= frag
+                       if ptrdata > frag {
+                               ptrdata -= frag
+                       } else {
+                               ptrdata = 0
+                       }
+               }
+               pwsize := asize &^ (sys.PtrSize - 1)
+               if pwsize > ptrdata {
+                       pwsize = ptrdata
                }
-               bulkBarrierPreWrite(uintptr(adst), uintptr(asrc), asize&^(sys.PtrSize-1))
+               bulkBarrierPreWrite(uintptr(adst), uintptr(asrc), pwsize)
        }
 
        memmove(dst, src, size)
@@ -270,7 +285,8 @@ func typedslicecopy(typ *_type, dst, src slice) int {
        // before calling typedslicecopy.
        size := uintptr(n) * typ.size
        if writeBarrier.needed {
-               bulkBarrierPreWrite(uintptr(dstp), uintptr(srcp), size)
+               pwsize := size - typ.size + typ.ptrdata
+               bulkBarrierPreWrite(uintptr(dstp), uintptr(srcp), pwsize)
        }
        // See typedmemmove for a discussion of the race between the
        // barrier and memmove.
@@ -318,7 +334,7 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
 //go:nosplit
 func typedmemclr(typ *_type, ptr unsafe.Pointer) {
        if typ.ptrdata != 0 {
-               bulkBarrierPreWrite(uintptr(ptr), 0, typ.size)
+               bulkBarrierPreWrite(uintptr(ptr), 0, typ.ptrdata)
        }
        memclrNoHeapPointers(ptr, typ.size)
 }
index 9ad814a5557da2e244bdfd334cef4544e19b7bb3..52353ea151c5d1c2f04c5bf467d2a204829869c4 100644 (file)
@@ -182,7 +182,7 @@ func growslice(et *_type, old slice, cap int) slice {
                if lenmem > 0 && writeBarrier.enabled {
                        // Only shade the pointers in old.array since we know the destination slice p
                        // only contains nil pointers because it has been cleared during alloc.
-                       bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem)
+                       bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem-et.size+et.ptrdata)
                }
        }
        memmove(p, old.array, lenmem)