]> Cypherpunks repositories - gostls13.git/commitdiff
internal/trace: only test for sync preemption if async preemption is off
authorMichael Anthony Knyszek <mknyszek@google.com>
Mon, 7 Jul 2025 17:19:17 +0000 (17:19 +0000)
committerMichael Knyszek <mknyszek@google.com>
Mon, 7 Jul 2025 20:42:35 +0000 (13:42 -0700)
Currently, the test change made for the fix to #68090 is flaky. This is
because the sync-point-only goroutine that we expect to be sync
preempted might only ever get async preempted in some circumstances.

This change adds a variant to all trace tests to run with
asyncpreemptoff=1, and the stacks test, the flaky one, only actually
checks for the sync-point in the trace when async preemption is
disabled.

Fixes #74417.

Change-Id: Ib6341bbc26921574b8f0fff6dd521ce83f85499c
Reviewed-on: https://go-review.googlesource.com/c/go/+/686055
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
src/internal/trace/trace_test.go

index 44b70553440242ba2844969191e3b6a3a0675614..bd3f078f05b40035ec6d5526a4a1bbdbf8bdc658 100644 (file)
@@ -23,7 +23,7 @@ import (
 )
 
 func TestTraceAnnotations(t *testing.T) {
-       testTraceProg(t, "annotations.go", func(t *testing.T, tb, _ []byte, _ bool) {
+       testTraceProg(t, "annotations.go", func(t *testing.T, tb, _ []byte, _ string) {
                type evDesc struct {
                        kind trace.EventKind
                        task trace.TaskID
@@ -98,7 +98,7 @@ func TestTraceCgoCallback(t *testing.T) {
 }
 
 func TestTraceCPUProfile(t *testing.T) {
-       testTraceProg(t, "cpu-profile.go", func(t *testing.T, tb, stderr []byte, _ bool) {
+       testTraceProg(t, "cpu-profile.go", func(t *testing.T, tb, stderr []byte, _ string) {
                // Parse stderr which has a CPU profile summary, if everything went well.
                // (If it didn't, we shouldn't even make it here.)
                scanner := bufio.NewScanner(bytes.NewReader(stderr))
@@ -211,7 +211,7 @@ func TestTraceCPUProfile(t *testing.T) {
 }
 
 func TestTraceFutileWakeup(t *testing.T) {
-       testTraceProg(t, "futile-wakeup.go", func(t *testing.T, tb, _ []byte, _ bool) {
+       testTraceProg(t, "futile-wakeup.go", func(t *testing.T, tb, _ []byte, _ string) {
                // Check to make sure that no goroutine in the "special" trace region
                // ends up blocking, unblocking, then immediately blocking again.
                //
@@ -312,7 +312,7 @@ func TestTraceGOMAXPROCS(t *testing.T) {
 }
 
 func TestTraceStacks(t *testing.T) {
-       testTraceProg(t, "stacks.go", func(t *testing.T, tb, _ []byte, stress bool) {
+       testTraceProg(t, "stacks.go", func(t *testing.T, tb, _ []byte, godebug string) {
                type frame struct {
                        fn   string
                        line int
@@ -403,11 +403,19 @@ func TestTraceStacks(t *testing.T) {
                                {"runtime.GOMAXPROCS", 0},
                                {"main.main", 0},
                        }},
-                       {trace.EventStateTransition, "Goroutine Running->Runnable", []frame{
+               }
+               asyncPreemptOff := strings.Contains(godebug, "asyncpreemptoff=1")
+               if asyncPreemptOff {
+                       // Only check for syncPreemptPoint if asynchronous preemption is disabled.
+                       // Otherwise, the syncPreemptPoint goroutine might be exclusively async
+                       // preempted, causing this test to flake.
+                       syncPreemptEv := evDesc{trace.EventStateTransition, "Goroutine Running->Runnable", []frame{
                                {"main.syncPreemptPoint", 0},
                                {"main.main.func12", 0},
-                       }},
+                       }}
+                       want = append(want, syncPreemptEv)
                }
+               stress := strings.Contains(godebug, "traceadvanceperiod=0")
                if !stress {
                        // Only check for this stack if !stress because traceAdvance alone could
                        // allocate enough memory to trigger a GC if called frequently enough.
@@ -535,7 +543,7 @@ func TestTraceIterPull(t *testing.T) {
        testTraceProg(t, "iter-pull.go", nil)
 }
 
-func checkReaderDeterminism(t *testing.T, tb, _ []byte, _ bool) {
+func checkReaderDeterminism(t *testing.T, tb, _ []byte, _ string) {
        events := func() []trace.Event {
                var evs []trace.Event
 
@@ -572,7 +580,7 @@ func checkReaderDeterminism(t *testing.T, tb, _ []byte, _ bool) {
        }
 }
 
-func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace, stderr []byte, stress bool)) {
+func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace, stderr []byte, godebug string)) {
        testenv.MustHaveGoRun(t)
 
        // Check if we're on a builder.
@@ -657,7 +665,7 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace
 
                // Run some extra validation.
                if !t.Failed() && extra != nil {
-                       extra(t, tb, errBuf.Bytes(), stress)
+                       extra(t, tb, errBuf.Bytes(), godebug)
                }
 
                // Dump some more information on failure.
@@ -686,6 +694,9 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace
        t.Run("Default", func(t *testing.T) {
                runTest(t, false, "")
        })
+       t.Run("AsyncPreemptOff", func(t *testing.T) {
+               runTest(t, false, "asyncpreemptoff=1")
+       })
        t.Run("Stress", func(t *testing.T) {
                if testing.Short() {
                        t.Skip("skipping trace stress tests in short mode")