First, skip all the allocation count tests.
In some cases this aligns with existing skips for -race, but in others
we've got new issues. These are debug modes, so some performance loss is
expected, and this is clearly no worse than today where the tests fail.
Next, skip internal linking and static linking tests for msan and asan.
With asan we get an explicit failure that neither are supported by the C
and/or Go compilers. With msan, we only get the Go compiler telling us
internal linking is unavailable. With static linking, we segfault
instead. Filed #70080 to track that.
Next, skip some malloc tests with asan that don't quite work because of
the redzone.
This is because of some sizeclass assumptions that get broken with the
redzone and the fact that the tiny allocator is effectively disabled
(again, due to the redzone).
Next, skip some runtime/pprof tests with asan, because of extra
allocations.
Next, skip some malloc tests with asan that also fail because of extra
allocations.
Next, fix up memstats accounting for arenas when asan is enabled. There
is a bug where more is added to the stats than subtracted. This also
simplifies the accounting a little.
Next, skip race tests with msan or asan enabled; they're mutually
incompatible.
Fixes #70054.
Fixes #64256.
Fixes #64257.
For #70079.
For #70080.
Change-Id: I99c02a0b9d621e44f1f918b307aa4a4944c3ec60
Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-asan-clang15,gotip-linux-amd64-msan-clang15
Reviewed-on: https://go-review.googlesource.com/c/go/+/622855
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Bypass: Michael Knyszek <mknyszek@google.com>
"bytes"
"errors"
"fmt"
+ "internal/asan"
"io"
"math/rand"
"strconv"
}
func TestReadStringAllocs(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
r := strings.NewReader(" foo foo 42 42 42 42 42 42 42 42 4.2 4.2 4.2 4.2\n")
buf := NewReader(r)
allocs := testing.AllocsPerRun(100, func() {
import (
"context"
"fmt"
+ "internal/asan"
"math"
"math/rand"
"os"
// issue 10303
func test10303(t *testing.T, n int) {
+ if asan.Enabled {
+ t.Skip("variable z is heap-allocated due to extra allocations with -asan; see #70079")
+ }
if runtime.Compiler == "gccgo" {
t.Skip("gccgo permits C pointers on the stack")
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !race
+//go:build !race && !asan && !msan
package test
}
}
- if t.raceDetectorSupported() {
+ if t.raceDetectorSupported() && !t.msan && !t.asan {
+ // N.B. -race is incompatible with -msan and -asan.
t.registerRaceTests()
}
// linkmode=internal isn't supported.
return false
}
+ if t.msan || t.asan {
+ // linkmode=internal isn't supported by msan or asan.
+ return false
+ }
return true
}
func (t *tester) internalLinkPIE() bool {
+ if t.msan || t.asan {
+ // linkmode=internal isn't supported by msan or asan.
+ return false
+ }
switch goos + "-" + goarch {
case "darwin-amd64", "darwin-arm64",
"linux-amd64", "linux-arm64", "linux-ppc64le",
}
// Static linking tests
- if goos != "android" && p != "netbsd/arm" {
+ if goos != "android" && p != "netbsd/arm" && !t.msan && !t.asan {
// TODO(#56629): Why does this fail on netbsd-arm?
+ // TODO(#70080): Why does this fail with msan?
+ // asan doesn't support static linking (this is an explicit build error on the C side).
cgoTest("static", "testtls", "external", "static", staticCheck)
}
cgoTest("external", "testnocgo", "external", "", staticCheck)
- if goos != "android" {
+ if goos != "android" && !t.msan && !t.asan {
+ // TODO(#70080): Why does this fail with msan?
+ // asan doesn't support static linking (this is an explicit build error on the C side).
cgoTest("static", "testnocgo", "external", "static", staticCheck)
cgoTest("static", "test", "external", "static", staticCheck)
// -static in CGO_LDFLAGS triggers a different code path
// than -static in -extldflags, so test both.
// See issue #16651.
- if goarch != "loong64" {
+ if goarch != "loong64" && !t.msan && !t.asan {
// TODO(#56623): Why does this fail on loong64?
cgoTest("auto-static", "test", "auto", "static", staticCheck)
}
"compress/flate"
"crypto/internal/boring"
"errors"
+ "internal/asan"
+ "internal/msan"
"internal/race"
"internal/testenv"
"io"
// Might be fixable with https://go.dev/issue/56378.
t.Skip("boringcrypto allocates")
}
- if race.Enabled {
- t.Skip("urandomRead allocates under -race")
+ if race.Enabled || msan.Enabled || asan.Enabled {
+ t.Skip("urandomRead allocates under -race, -asan, and -msan")
}
testenv.SkipIfOptimizationOff(t)
import (
"database/sql/driver"
"fmt"
+ "internal/asan"
"reflect"
"runtime"
"strings"
{"bool", false, "false"},
{"time", time.Unix(2, 5).UTC(), "1970-01-01T00:00:02.000000005Z"},
}
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
var buf RawBytes
rows := &Rows{}
import (
"bytes"
"fmt"
+ "internal/asan"
"io"
"math"
"reflect"
}
func TestAppendAllocs(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
buf := make([]byte, 0, Size(&s))
var err error
allocs := testing.AllocsPerRun(1, func() {
}
func TestSizeAllocs(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
for _, data := range sizableTypes {
t.Run(fmt.Sprintf("%T", data), func(t *testing.T) {
// Size uses a sync.Map behind the scenes. The slow lookup path of
package slog
import (
+ "internal/asan"
"internal/testenv"
"testing"
"time"
)
func TestAttrNoAlloc(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates with -asan")
+ }
testenv.SkipIfOptimizationOff(t)
// Assign values just to make sure the compiler doesn't optimize away the statements.
var (
import (
"bytes"
"context"
+ "internal/asan"
+ "internal/msan"
"internal/race"
"internal/testenv"
"io"
}
func wantAllocs(t *testing.T, want int, f func()) {
- if race.Enabled {
- t.Skip("skipping test in race mode")
+ if race.Enabled || asan.Enabled || msan.Enabled {
+ t.Skip("skipping test in race, asan, and msan modes")
}
testenv.SkipIfOptimizationOff(t)
t.Helper()
import (
"fmt"
+ "internal/asan"
"reflect"
"strings"
"testing"
}
func TestValueNoAlloc(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
+
// Assign values just to make sure the compiler doesn't optimize away the statements.
var (
i int64
"encoding/json"
"flag"
"fmt"
+ "internal/asan"
"internal/testenv"
"net"
. "net/netip"
)
func TestNoAllocs(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
+
// Wrappers that panic on error, to prove that our alloc-free
// methods are returning successfully.
panicIP := func(ip Addr, err error) Addr {
import (
"errors"
"fmt"
+ "internal/asan"
"internal/testenv"
"net/netip"
"os"
if !testableNetwork("udp4") {
t.Skipf("skipping: udp4 not available")
}
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
// Optimizations are required to remove the allocs.
testenv.SkipIfOptimizationOff(t)
"flag"
"fmt"
"go/token"
+ "internal/asan"
"internal/goarch"
"internal/goexperiment"
"internal/testenv"
if goexperiment.SwissMap {
t.Skipf("Maps on stack not yet implemented")
}
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
for _, tt := range deepEqualPerfTests {
t.Run(ValueOf(tt.x).Type().String(), func(t *testing.T) {
}
func TestMapAlloc(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
m := ValueOf(make(map[int]int, 10))
k := ValueOf(5)
v := ValueOf(7)
}
func TestChanAlloc(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
// Note: for a chan int, the return Value must be allocated, so we
// use a chan *int instead.
c := ValueOf(make(chan *int, 1))
}
// Reset should not allocate.
+ //
+ // Except with -asan, where there are additional allocations.
+ // See #70079.
n := int(testing.AllocsPerRun(10, func() {
iter.Reset(ValueOf(m2))
iter.Reset(Value{})
}))
- if n > 0 {
+ if !asan.Enabled && n > 0 {
t.Errorf("MapIter.Reset allocated %d times", n)
}
}
if asanenabled {
// TODO(mknyszek): Track individual objects.
- rzSize := redZoneSize(span.elemsize)
- span.elemsize -= rzSize
- span.largeType.Size_ = span.elemsize
+ // N.B. span.elemsize includes a redzone already.
rzStart := span.base() + span.elemsize
- span.userArenaChunkFree = makeAddrRange(span.base(), rzStart)
asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart)
asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
}
s.freeindex = 1
s.allocCount = 1
+ // Adjust size to include redzone.
+ if asanenabled {
+ s.elemsize -= redZoneSize(s.elemsize)
+ }
+
// Account for this new arena chunk memory.
gcController.heapInUse.add(int64(userArenaChunkBytes))
gcController.heapReleased.add(-int64(userArenaChunkBytes))
import (
"fmt"
"internal/abi"
+ "internal/asan"
+ "internal/msan"
"math"
"os"
"regexp"
// a debugger.
skipUnderDebugger(t)
+ // asan/msan instrumentation interferes with tests since we might
+ // inject debugCallV2 while in the asan/msan runtime. This is a
+ // problem for doing things like running the GC or taking stack
+ // traces. Not sure why this is happening yet, but skip for now.
+ if msan.Enabled || asan.Enabled {
+ t.Skip("debugCallV2 is injected erroneously during asan/msan runtime calls; skipping")
+ }
+
// This can deadlock if there aren't enough threads or if a GC
// tries to interrupt an atomic loop (see issue #10958). Execute
// an extra GC to ensure even the sweep phase is done (out of
import (
"fmt"
+ "internal/asan"
"math/bits"
"math/rand"
"os"
}
func TestGCTestMoveStackOnNextCall(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("extra allocations with -asan causes this to fail; see #70079")
+ }
t.Parallel()
var onStack int
// GCTestMoveStackOnNextCall can fail in rare cases if there's
var pointerClassData = 42
func TestGCTestPointerClass(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("extra allocations cause this test to fail; see #70079")
+ }
t.Parallel()
check := func(p unsafe.Pointer, want string) {
t.Helper()
import (
"flag"
"fmt"
+ "internal/asan"
"internal/race"
"internal/testenv"
"os"
if runtime.Raceenabled {
t.Skip("tinyalloc suppressed when running in race mode")
}
+ if asan.Enabled {
+ t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
+ }
const N = 16
var v [N]unsafe.Pointer
for i := range v {
if runtime.Raceenabled {
t.Skip("tinyalloc suppressed when running in race mode")
}
+ if asan.Enabled {
+ t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
+ }
// Try to cause an alignment access fault
// by atomically accessing the first 64-bit
// value of a tiny-allocated object.
package runtime_test
import (
+ "internal/asan"
"runtime"
"testing"
"time"
// Make sure an empty slice on the stack doesn't pin the next object in memory.
func TestEmptySlice(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("skipping with -asan: test assumes exact size class alignment, but asan redzone breaks that assumption")
+ }
x, y := adjChunks()
// the pointer inside xs points to y.
// Make sure an empty string on the stack doesn't pin the next object in memory.
func TestEmptyString(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("skipping with -asan: test assumes exact size class alignment, but asan redzone breaks that assumption")
+ }
x, y := adjStringChunk()
ss := x[objsize:] // change objsize to objsize-1 and the test passes
import (
"bytes"
"fmt"
+ "internal/asan"
"internal/profile"
"reflect"
"regexp"
var memoryProfilerRun = 0
func TestMemoryProfiler(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("extra allocations with -asan throw off the test; see #70079")
+ }
+
// Disable sampling, otherwise it's difficult to assert anything.
oldRate := runtime.MemProfileRate
runtime.MemProfileRate = 1
}{{
stk: []string{"runtime/pprof.allocatePersistent1K", "runtime/pprof.TestMemoryProfiler"},
legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:47
-# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:82
+# 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:48
+# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:87
`, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun),
}, {
stk: []string{"runtime/pprof.allocateTransient1M", "runtime/pprof.TestMemoryProfiler"},
legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:24
-# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:79
+# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:25
+# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:84
`, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun),
}, {
stk: []string{"runtime/pprof.allocateTransient2M", "runtime/pprof.TestMemoryProfiler"},
legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:30
-# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:80
+# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:31
+# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:85
`, memoryProfilerRun, (2<<20)*memoryProfilerRun),
}, {
stk: []string{"runtime/pprof.allocateTransient2MInline", "runtime/pprof.TestMemoryProfiler"},
legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:34
-# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:81
+# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:35
+# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:86
`, memoryProfilerRun, (2<<20)*memoryProfilerRun),
}, {
stk: []string{"runtime/pprof.allocateReflectTransient"},
legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @( 0x[0-9,a-f]+)+
-# 0x[0-9,a-f]+ runtime/pprof\.allocateReflectTransient\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:55
+# 0x[0-9,a-f]+ runtime/pprof\.allocateReflectTransient\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:56
`, memoryProfilerRun, (2<<20)*memoryProfilerRun),
}}
import (
"bytes"
"fmt"
+ "internal/asan"
"internal/profile"
"internal/profilerecord"
"internal/testenv"
// This is a regression test for https://go.dev/issue/64528 .
func TestGenericsHashKeyInPprofBuilder(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("extra allocations with -asan throw off the test; see #70079")
+ }
previousRate := runtime.MemProfileRate
runtime.MemProfileRate = 1
defer func() {
}
func TestGenericsInlineLocations(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("extra allocations with -asan throw off the test; see #70079")
+ }
if testenv.OptimizationOff() {
t.Skip("skipping test with optimizations disabled")
}
import (
"cmp"
+ "internal/asan"
+ "internal/msan"
"internal/race"
"internal/testenv"
"math"
}
}
- if !testenv.OptimizationOff() && !race.Enabled {
+ if !testenv.OptimizationOff() && !race.Enabled && !asan.Enabled && !msan.Enabled {
// Allocations should be amortized.
const count = 50
n := testing.AllocsPerRun(10, func() {
}
if n := testing.AllocsPerRun(100, func() { _ = Grow(s2, cap(s2)-len(s2)+1) }); n != 1 {
errorf := t.Errorf
- if race.Enabled || testenv.OptimizationOff() {
+ if race.Enabled || msan.Enabled || asan.Enabled || testenv.OptimizationOff() {
errorf = t.Logf // this allocates multiple times in race detector mode
}
errorf("Grow should allocate once when given insufficient capacity; allocated %v times", n)
_ = sink
if allocs > 1 {
errorf := t.Errorf
- if testenv.OptimizationOff() || race.Enabled {
+ if testenv.OptimizationOff() || race.Enabled || asan.Enabled || msan.Enabled {
errorf = t.Logf
}
errorf("Concat(%v) allocated %v times; want 1", tc.s, allocs)
import (
"bytes"
+ "internal/asan"
. "strings"
"testing"
"unicode/utf8"
func TestBuilderGrow(t *testing.T) {
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
+ if asan.Enabled {
+ t.Logf("skipping allocs check for growLen %d: extra allocs with -asan; see #70079", growLen)
+ continue
+ }
p := bytes.Repeat([]byte{'a'}, growLen)
allocs := testing.AllocsPerRun(100, func() {
var b Builder
}
func TestBuilderAllocs(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
// Issue 23382; verify that copyCheck doesn't force the
// Builder to escape and be heap allocated.
n := testing.AllocsPerRun(10000, func() {
}
func TestBuilderGrowSizeclasses(t *testing.T) {
+ if asan.Enabled {
+ t.Skip("test allocates more with -asan; see #70079")
+ }
s := Repeat("a", 19)
allocs := testing.AllocsPerRun(100, func() {
var b Builder