import (
"flag"
"fmt"
- "internal/race"
"internal/sysinfo"
"io"
"math"
// Try to get a comparable environment for each run
// by clearing garbage from previous runs.
runtime.GC()
- b.raceErrors = -race.Errors()
b.N = n
b.parallelism = 1
b.ResetTimer()
b.StopTimer()
b.previousN = n
b.previousDuration = b.duration
- b.raceErrors += race.Errors()
- if b.raceErrors > 0 {
+ if fetchRaceErrors() > 0 {
b.Errorf("race detected during execution of benchmark")
}
}
numFailed atomic.Uint32 // number of test failures
+ numRaceErrors atomic.Uint32 // number of race detector errors
+
running sync.Map // map[string]time.Time of running, unpaused tests
)
cleanupPc []uintptr // The stack trace at the point where Cleanup was called.
finished bool // Test function has completed.
inFuzzFn bool // Whether the fuzz target, if this is one, is running.
+ raceErrors int // Number of races detected during test.
chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set.
bench bool // Whether the current test is a benchmark.
hasSub atomic.Bool // whether there are sub-benchmarks.
cleanupStarted atomic.Bool // Registered cleanup callbacks have started to execute
- raceErrors int // Number of races detected during test.
runner string // Function name of tRunner running the test.
isParallel bool // Whether the test is parallel.
return chatty.on
}
+// fetchRaceErrors returns the number of unreported race errors.
+// Calling this resets the number to zero,
+// so the caller is responsible for reporting any errors.
+func fetchRaceErrors() int {
+ if !race.Enabled {
+ return 0
+ }
+
+ for {
+ tot := uint32(race.Errors())
+ seen := numRaceErrors.Load()
+ if tot == seen {
+ return 0
+ }
+ if numRaceErrors.CompareAndSwap(seen, tot) {
+ return int(tot - seen)
+ }
+ }
+}
+
func (c *common) checkFuzzFn(name string) {
if c.inFuzzFn {
panic(fmt.Sprintf("testing: f.%s was called inside the fuzz target, use t.%s instead", name, name))
// Failed reports whether the function has failed.
func (c *common) Failed() bool {
- c.mu.RLock()
- failed := c.failed
- c.mu.RUnlock()
- return failed || c.raceErrors+race.Errors() > 0
+ var (
+ failed bool
+ raceErrors int
+
+ fetch = func() {
+ failed = c.failed
+ raceErrors = c.raceErrors
+ }
+ )
+ if newRaceErrors := fetchRaceErrors(); newRaceErrors > 0 {
+ c.mu.Lock()
+ c.raceErrors += newRaceErrors
+ fetch()
+ c.mu.Unlock()
+ } else {
+ c.mu.RLock()
+ fetch()
+ c.mu.RUnlock()
+ }
+
+ return failed || raceErrors > 0
}
// FailNow marks the function as having failed and stops its execution
return
}
+ // Collect any race errors seen so far.
+ if newRaceErrors := fetchRaceErrors(); newRaceErrors > 0 {
+ t.mu.Lock()
+ t.raceErrors += newRaceErrors
+ t.mu.Unlock()
+ }
+
// We don't want to include the time we spend waiting for serial tests
// in the test duration. Record the elapsed time thus far and reset the
// timer afterwards.
// Add to the list of tests to be released by the parent.
t.parent.sub = append(t.parent.sub, t)
- t.raceErrors += race.Errors()
if t.chatty != nil {
t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name)
running.Store(t.name, time.Now())
t.start = time.Now()
- t.raceErrors += -race.Errors()
}
// Setenv calls os.Setenv(key, value) and uses Cleanup to
numFailed.Add(1)
}
- if t.raceErrors+race.Errors() > 0 {
+ t.mu.RLock()
+ raceErrors := t.raceErrors
+ t.mu.RUnlock()
+ if raceErrors+fetchRaceErrors() > 0 {
t.Errorf("race detected during execution of test")
}
}()
t.start = time.Now()
- t.raceErrors = -race.Errors()
fn(t)
// code beyond here will not be executed when FailNow is invoked
import (
"bytes"
+ "internal/race"
"internal/testenv"
"os"
"path/filepath"
t.Errorf("in non-test testing.Test() returned %q, want %q", s, "false")
}
}
+
+// runTest runs a helper test with -test.v.
+// It returns the test output and test exit status.
+func runTest(t *testing.T, test string) ([]byte, error) {
+ t.Helper()
+
+ testenv.MustHaveExec(t)
+
+ exe, err := os.Executable()
+ if err != nil {
+ t.Skipf("can't find test executable: %v", err)
+ }
+
+ cmd := testenv.Command(t, exe, "-test.run="+test, "-test.v", "-test.parallel=2")
+ cmd = testenv.CleanCmdEnv(cmd)
+ cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+
+ return out, err
+}
+
+// generateRaceReport generates a race detector report if run under
+// the race detector.
+func generateRaceReport() {
+ var x int
+ c1 := make(chan bool)
+ c2 := make(chan int, 1)
+ go func() {
+ x = 1 // racy write
+ c1 <- true
+ }()
+ c2 <- x // racy read
+ <-c1
+}
+
+func TestRaceReports(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ // Generate a race detector report in a sub test.
+ t.Run("Sub", func(t *testing.T) {
+ generateRaceReport()
+ })
+ return
+ }
+
+ out, _ := runTest(t, "TestRaceReports")
+
+ // We should see at most one race detector report.
+ c := bytes.Count(out, []byte("race detected"))
+ want := 0
+ if race.Enabled {
+ want = 1
+ }
+ if c != want {
+ t.Errorf("got %d race reports, want %d", c, want)
+ }
+}
+
+// Issue #60083. This used to fail on the race builder.
+func TestRaceName(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ generateRaceReport()
+ return
+ }
+
+ out, _ := runTest(t, "TestRaceName")
+
+ if bytes.Contains(out, []byte("=== NAME \n")) {
+ t.Errorf("incorrectly reported test with no name")
+ }
+}
+
+func TestRaceSubReports(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
+ t.Parallel()
+ c1 := make(chan bool, 1)
+ c2 := make(chan bool, 1)
+ t.Run("sub", func(t *testing.T) {
+ t.Run("subsub1", func(t *testing.T) {
+ t.Parallel()
+ generateRaceReport()
+ c1 <- true
+ })
+ t.Run("subsub2", func(t *testing.T) {
+ t.Parallel()
+ <-c1
+ generateRaceReport()
+ c2 <- true
+ })
+ })
+ <-c2
+ generateRaceReport()
+ return
+ }
+
+ out, _ := runTest(t, "TestRaceSubReports")
+
+ // There should be three race reports.
+ c := bytes.Count(out, []byte("race detected during execution of test"))
+ want := 0
+ if race.Enabled {
+ want = 3
+ }
+ if c != want {
+ t.Errorf("got %d race reports, want %d", c, want)
+ }
+}