]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: eliminate all writebarrierptr* calls
authorAustin Clements <austin@google.com>
Mon, 15 Jan 2018 05:00:02 +0000 (00:00 -0500)
committerAustin Clements <austin@google.com>
Tue, 13 Feb 2018 16:34:45 +0000 (16:34 +0000)
Calls to writebarrierptr can simply be actual pointer writes. Calls to
writebarrierptr_prewrite need to go through the write barrier buffer.

Updates #22460.

Change-Id: I92cee4da98c5baa499f1977563757c76f95bf0ca
Reviewed-on: https://go-review.googlesource.com/92704
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Rick Hudson <rlh@golang.org>
src/runtime/atomic_pointer.go
src/runtime/chan.go
src/runtime/hashmap_fast.go
src/runtime/mbitmap.go

index 292b3517ad592d4e9449eaa1178392c5ff744917..09cfbda9b1cf2e7a6652225625f3b20c7c8b8182 100644 (file)
@@ -16,11 +16,24 @@ import (
 // Instead, these are wrappers around the actual atomics (casp1 and so on)
 // that use noescape to convey which arguments do not escape.
 
+// atomicwb performs a write barrier before an atomic pointer write.
+// The caller should guard the call with "if writeBarrier.enabled".
+//
+//go:nosplit
+func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) {
+       slot := (*uintptr)(unsafe.Pointer(ptr))
+       if !getg().m.p.ptr().wbBuf.putFast(*slot, uintptr(new)) {
+               wbBufFlush(slot, uintptr(new))
+       }
+}
+
 // atomicstorep performs *ptr = new atomically and invokes a write barrier.
 //
 //go:nosplit
 func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
-       writebarrierptr_prewrite((*uintptr)(ptr), uintptr(new))
+       if writeBarrier.enabled {
+               atomicwb((*unsafe.Pointer)(ptr), new)
+       }
        atomic.StorepNoWB(noescape(ptr), new)
 }
 
@@ -29,7 +42,9 @@ func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
        // The write barrier is only necessary if the CAS succeeds,
        // but since it needs to happen before the write becomes
        // public, we have to do it conservatively all the time.
-       writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+       if writeBarrier.enabled {
+               atomicwb(ptr, new)
+       }
        return atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
 }
 
@@ -43,7 +58,9 @@ func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
 //go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
 //go:nosplit
 func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
-       writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+       if writeBarrier.enabled {
+               atomicwb(ptr, new)
+       }
        sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
 }
 
@@ -53,7 +70,9 @@ func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
 //go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer
 //go:nosplit
 func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
-       writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+       if writeBarrier.enabled {
+               atomicwb(ptr, new)
+       }
        old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new)))
        return old
 }
@@ -64,6 +83,8 @@ func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
 //go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer
 //go:nosplit
 func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
-       writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+       if writeBarrier.enabled {
+               atomicwb(ptr, new)
+       }
        return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new))
 }
index 41ae803574b5920727e2257a09fa6aee609a9142..678128b4ceb54e3af953e0d8651c5909ae6a4c00 100644 (file)
@@ -310,6 +310,8 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
        // So make sure that no preemption points can happen between read & use.
        dst := sg.elem
        typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
+       // No need for cgo write barrier checks because dst is always
+       // Go memory.
        memmove(dst, src, t.size)
 }
 
index 2de381412bec4bc8d9b27c13150828bcf05d4264..f978d1be7b8c1e9b72512e4ad031d785ccad2df9 100644 (file)
@@ -1002,7 +1002,8 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
 
                                // Copy key.
                                if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
-                                       writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k))
+                                       // Write with a write barrier.
+                                       *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
                                } else {
                                        *(*uint32)(dst.k) = *(*uint32)(k)
                                }
@@ -1103,7 +1104,8 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
                                // Copy key.
                                if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
                                        if sys.PtrSize == 8 {
-                                               writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k))
+                                               // Write with a write barrier.
+                                               *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
                                        } else {
                                                // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
                                                // Give up and call typedmemmove.
index 3a88f177885b24d3ef80c664592115971a92c616..8e035051243b70bb0ea84cc7b0ed94fd277297b2 100644 (file)
@@ -550,6 +550,8 @@ func (h heapBits) setCheckmarked(size uintptr) {
 // make sure the underlying allocation contains pointers, usually
 // by checking typ.kind&kindNoPointers.
 //
+// Callers must perform cgo checks if writeBarrier.cgo.
+//
 //go:nosplit
 func bulkBarrierPreWrite(dst, src, size uintptr) {
        if (dst|src|size)&(sys.PtrSize-1) != 0 {
@@ -649,7 +651,7 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
        }
 }
 
-// typeBitsBulkBarrier executes writebarrierptr_prewrite for every
+// typeBitsBulkBarrier executes a write barrier for every
 // pointer that would be copied from [src, src+size) to [dst,
 // dst+size) by a memmove using the type bitmap to locate those
 // pointer slots.
@@ -663,6 +665,8 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
 // Must not be preempted because it typically runs right before memmove,
 // and the GC must observe them as an atomic action.
 //
+// Callers must perform cgo checks if writeBarrier.cgo.
+//
 //go:nosplit
 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
        if typ == nil {
@@ -680,6 +684,7 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
                return
        }
        ptrmask := typ.gcdata
+       buf := &getg().m.p.ptr().wbBuf
        var bits uint32
        for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
                if i&(sys.PtrSize*8-1) == 0 {
@@ -691,7 +696,9 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
                if bits&1 != 0 {
                        dstx := (*uintptr)(unsafe.Pointer(dst + i))
                        srcx := (*uintptr)(unsafe.Pointer(src + i))
-                       writebarrierptr_prewrite(dstx, *srcx)
+                       if !buf.putFast(*dstx, *srcx) {
+                               wbBufFlush(nil, 0)
+                       }
                }
        }
 }