if msanenabled && new != nil {
                msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
        }
-
+       if asanenabled && new != nil {
+               asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
+       }
        if _cgo_sigaction == nil || inForkedChild {
                sysSigaction(sig, new, old)
        } else {
        if msanenabled && old != nil {
                msanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
        }
+       if asanenabled && old != nil {
+               asanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
+       }
 }
 
 // callCgoSigaction calls the sigaction function in the runtime/cgo package
 
        if msanenabled {
                msanread(v, t.size)
        }
+       if asanenabled {
+               asanread(v, t.size)
+       }
        x := mallocgc(t.size, t, true)
        typedmemmove(t, x, v)
        return x
        if msanenabled {
                msanread(v, t.size)
        }
+       if asanenabled {
+               asanread(v, t.size)
+       }
+
        x := mallocgc(t.size, t, false)
        memmove(x, v, t.size)
        return x
 
        if size == 0 {
                return unsafe.Pointer(&zerobase)
        }
+       userSize := size
+       if asanenabled {
+               // Refer to ASAN runtime library, the malloc() function allocates extra memory,
+               // the redzone, around the user requested memory region. And the redzones are marked
+               // as unaddressable. We perform the same operations in Go to detect the overflows or
+               // underflows.
+               size += computeRZlog(size)
+       }
 
        if debug.malloc {
                if debug.sbrk != 0 {
        mp.mallocing = 1
 
        shouldhelpgc := false
-       dataSize := size
+       dataSize := userSize
        c := getMCache(mp)
        if c == nil {
                throw("mallocgc called without a P or outside bootstrapping")
                msanmalloc(x, size)
        }
 
+       if asanenabled {
+               // We should only read/write the memory with the size asked by the user.
+               // The rest of the allocated memory should be poisoned, so that we can report
+               // errors when accessing poisoned memory.
+               // The allocated memory is larger than required userSize, it will also include
+               // redzone and some other padding bytes.
+               rzBeg := unsafe.Add(x, userSize)
+               asanpoison(rzBeg, size-userSize)
+               asanunpoison(x, userSize)
+       }
+
        if rate := MemProfileRate; rate > 0 {
                // Note cache c only valid while m acquired; see #47302
                if rate != 1 && size < c.nextSample {
 func (p *notInHeap) add(bytes uintptr) *notInHeap {
        return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
 }
+
+// computeRZlog computes the size of the redzone.
+// Refer to the implementation of the compiler-rt.
+func computeRZlog(userSize uintptr) uintptr {
+       switch {
+       case userSize <= (64 - 16):
+               return 16 << 0
+       case userSize <= (128 - 32):
+               return 16 << 1
+       case userSize <= (512 - 64):
+               return 16 << 2
+       case userSize <= (4096 - 128):
+               return 16 << 3
+       case userSize <= (1<<14)-256:
+               return 16 << 4
+       case userSize <= (1<<15)-512:
+               return 16 << 5
+       case userSize <= (1<<16)-1024:
+               return 16 << 6
+       default:
+               return 16 << 7
+       }
+}
 
        if msanenabled && h != nil {
                msanread(key, t.key.size)
        }
+       if asanenabled && h != nil {
+               asanread(key, t.key.size)
+       }
        if h == nil || h.count == 0 {
                if t.hashMightPanic() {
                        t.hasher(key, 0) // see issue 23734
        if msanenabled && h != nil {
                msanread(key, t.key.size)
        }
+       if asanenabled && h != nil {
+               asanread(key, t.key.size)
+       }
        if h == nil || h.count == 0 {
                if t.hashMightPanic() {
                        t.hasher(key, 0) // see issue 23734
        if msanenabled {
                msanread(key, t.key.size)
        }
+       if asanenabled {
+               asanread(key, t.key.size)
+       }
        if h.flags&hashWriting != 0 {
                throw("concurrent map writes")
        }
        if msanenabled && h != nil {
                msanread(key, t.key.size)
        }
+       if asanenabled && h != nil {
+               asanread(key, t.key.size)
+       }
        if h == nil || h.count == 0 {
                if t.hashMightPanic() {
                        t.hasher(key, 0) // see issue 23734
 
                msanwrite(dst, typ.size)
                msanread(src, typ.size)
        }
+       if asanenabled {
+               asanwrite(dst, typ.size)
+               asanread(src, typ.size)
+       }
        typedmemmove(typ, dst, src)
 }
 
                msanwrite(dstPtr, uintptr(n)*typ.size)
                msanread(srcPtr, uintptr(n)*typ.size)
        }
+       if asanenabled {
+               asanwrite(dstPtr, uintptr(n)*typ.size)
+               asanread(srcPtr, uintptr(n)*typ.size)
+       }
 
        if writeBarrier.cgo {
                cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
 
                spanHasNoSpecials(s)
        }
 
-       if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
+       if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
                // Find all newly freed objects. This doesn't have to
                // efficient; allocfreetrace has massive overhead.
                mbits := s.markBitsForBase()
                                if msanenabled {
                                        msanfree(unsafe.Pointer(x), size)
                                }
+                               if asanenabled {
+                                       asanpoison(unsafe.Pointer(x), size)
+                               }
                        }
                        mbits.advance()
                        abits.advance()
 
                        bytes := s.npages << _PageShift
                        msanfree(base, bytes)
                }
+               if asanenabled {
+                       // Tell asan that this entire span is no longer in use.
+                       base := unsafe.Pointer(s.base())
+                       bytes := s.npages << _PageShift
+                       asanpoison(base, bytes)
+               }
                h.freeSpanLocked(s, spanAllocHeap)
                unlock(&h.lock)
        })
 
        if msanenabled {
                msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
        }
+       if asanenabled {
+               asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+       }
        copy(r.Stack0[:], b.stk())
        for i := int(b.nstk); i < len(r.Stack0); i++ {
                r.Stack0[i] = 0
                        if msanenabled {
                                msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
                        }
+                       if asanenabled {
+                               asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
+                       }
                        i := copy(r.Stack0[:], b.stk())
                        for ; i < len(r.Stack0); i++ {
                                r.Stack0[i] = 0
 
                if msanenabled {
                        msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
                }
+               if asanenabled {
+                       asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
+               }
                execLock.rlock() // Prevent process clone.
                asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
                execLock.runlock()
                if msanenabled {
                        msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
                }
+               if asanenabled {
+                       asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
+               }
        }
        return gp
 }
 
                        msanwrite(cas.elem, c.elemtype.size)
                }
        }
+       if asanenabled {
+               if casi < nsends {
+                       asanread(cas.elem, c.elemtype.size)
+               } else if cas.elem != nil {
+                       asanwrite(cas.elem, c.elemtype.size)
+               }
+       }
 
        selunlock(scases, lockorder)
        goto retc
        if msanenabled && cas.elem != nil {
                msanwrite(cas.elem, c.elemtype.size)
        }
+       if asanenabled && cas.elem != nil {
+               asanwrite(cas.elem, c.elemtype.size)
+       }
        recvOK = true
        qp = chanbuf(c, c.recvx)
        if cas.elem != nil {
        if msanenabled {
                msanread(cas.elem, c.elemtype.size)
        }
+       if asanenabled {
+               asanread(cas.elem, c.elemtype.size)
+       }
        typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
        c.sendx++
        if c.sendx == c.dataqsiz {
        if msanenabled {
                msanread(cas.elem, c.elemtype.size)
        }
+       if asanenabled {
+               asanread(cas.elem, c.elemtype.size)
+       }
        send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
        if debugSelect {
                print("syncsend: cas0=", cas0, " c=", c, "\n")
 
        if msanenabled {
                msanread(from, copymem)
        }
+       if asanenabled {
+               asanread(from, copymem)
+       }
 
        memmove(to, from, copymem)
 
        if msanenabled {
                msanread(old.array, uintptr(old.len*int(et.size)))
        }
+       if asanenabled {
+               asanread(old.array, uintptr(old.len*int(et.size)))
+       }
 
        if cap < old.cap {
                panic(errorString("growslice: cap out of range"))
                msanread(fromPtr, size)
                msanwrite(toPtr, size)
        }
+       if asanenabled {
+               asanread(fromPtr, size)
+               asanwrite(toPtr, size)
+       }
 
        if size == 1 { // common case worth about 2x to do here
                // TODO: is this still worth it with new memmove impl?
 
        if msanenabled {
                msanmalloc(v, uintptr(n))
        }
+       if asanenabled {
+               asanunpoison(v, uintptr(n))
+       }
        if stackDebug >= 1 {
                print("  allocated ", v, "\n")
        }
        if msanenabled {
                msanfree(v, n)
        }
+       if asanenabled {
+               asanpoison(v, n)
+       }
        if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
                order := uint8(0)
                n2 := n
 
        if msanenabled {
                msanread(unsafe.Pointer(ptr), uintptr(n))
        }
+       if asanenabled {
+               asanread(unsafe.Pointer(ptr), uintptr(n))
+       }
        if n == 1 {
                p := unsafe.Pointer(&staticuint64s[*ptr])
                if goarch.BigEndian {
        if msanenabled && n > 0 {
                msanread(unsafe.Pointer(ptr), uintptr(n))
        }
+       if asanenabled && n > 0 {
+               asanread(unsafe.Pointer(ptr), uintptr(n))
+       }
        stringStructOf(&str).str = unsafe.Pointer(ptr)
        stringStructOf(&str).len = n
        return
        if msanenabled && len(a) > 0 {
                msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
        }
+       if asanenabled && len(a) > 0 {
+               asanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
+       }
        var dum [4]byte
        size1 := 0
        for _, r := range a {
 
        if msanenabled {
                msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
        }
+       if asanenabled {
+               asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
+       }
        call(cgoSymbolizer, noescape(unsafe.Pointer(arg)))
 }
 
        if msanenabled {
                msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
        }
+       if asanenabled {
+               asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
+       }
        call(cgoTraceback, noescape(unsafe.Pointer(&arg)))
 }
 
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build asan
+// +build asan
+
+package syscall
+
+import (
+       "runtime"
+       "unsafe"
+)
+
+const asanenabled = true
+
+func asanRead(addr unsafe.Pointer, len int) {
+       runtime.ASanRead(addr, len)
+}
+
+func asanWrite(addr unsafe.Pointer, len int) {
+       runtime.ASanWrite(addr, len)
+}
 
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !asan
+// +build !asan
+
+package syscall
+
+import (
+       "unsafe"
+)
+
+const asanenabled = false
+
+func asanRead(addr unsafe.Pointer, len int) {
+}
+
+func asanWrite(addr unsafe.Pointer, len int) {
+}
 
        if msanenabled && n > 0 {
                msanWrite(unsafe.Pointer(&p[0]), n)
        }
+       if asanenabled && n > 0 {
+               asanWrite(unsafe.Pointer(&p[0]), n)
+       }
        return
 }
 
        if msanenabled && n > 0 {
                msanRead(unsafe.Pointer(&p[0]), n)
        }
+       if asanenabled && n > 0 {
+               asanRead(unsafe.Pointer(&p[0]), n)
+       }
        return
 }
 
 
        if msanenabled && done > 0 {
                msanWrite(unsafe.Pointer(&p[0]), int(done))
        }
+       if asanenabled && done > 0 {
+               asanWrite(unsafe.Pointer(&p[0]), int(done))
+       }
        return int(done), nil
 }
 
        if msanenabled && done > 0 {
                msanRead(unsafe.Pointer(&p[0]), int(done))
        }
+       if asanenabled && done > 0 {
+               asanRead(unsafe.Pointer(&p[0]), int(done))
+       }
        return int(done), nil
 }