From: Michael Anthony Knyszek Date: Wed, 21 May 2025 17:50:15 +0000 (+0000) Subject: internal/trace: skip clock snapshot checks on Windows in stress mode X-Git-Tag: go1.25rc1~139 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=77345f41ee36c8db3ba7e0f687a8834fa7b83c48;p=gostls13.git internal/trace: skip clock snapshot checks on Windows in stress mode Windows' monotonic and wall clock granularity is just too coarse to get reasonable values out of stress mode, which is creating new trace generations constantly. Fixes #73813. Change-Id: Id9cb2fed9775ce8d78a736d0164daa7bf45075e0 Reviewed-on: https://go-review.googlesource.com/c/go/+/675096 Reviewed-by: Felix Geisendörfer Reviewed-by: Roland Shoemaker Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI --- diff --git a/src/internal/trace/reader_test.go b/src/internal/trace/reader_test.go index 222d2dfa82..691cda6688 100644 --- a/src/internal/trace/reader_test.go +++ b/src/internal/trace/reader_test.go @@ -42,7 +42,9 @@ func TestReaderGolden(t *testing.T) { if err != nil { t.Fatalf("failed to parse test file at %s: %v", testPath, err) } - testReader(t, tr, ver, exp) + v := testtrace.NewValidator() + v.GoVersion = ver + testReader(t, tr, v, exp) }) } } @@ -94,7 +96,7 @@ func FuzzReader(f *testing.F) { }) } -func testReader(t *testing.T, tr io.Reader, ver version.Version, exp *testtrace.Expectation) { +func testReader(t *testing.T, tr io.Reader, v *testtrace.Validator, exp *testtrace.Expectation) { r, err := trace.NewReader(tr) if err != nil { if err := exp.Check(err); err != nil { @@ -102,8 +104,6 @@ func testReader(t *testing.T, tr io.Reader, ver version.Version, exp *testtrace. } return } - v := testtrace.NewValidator() - v.GoVersion = ver for { ev, err := r.ReadEvent() if err == io.EOF { diff --git a/src/internal/trace/testtrace/validation.go b/src/internal/trace/testtrace/validation.go index da0e871287..2060d0d44c 100644 --- a/src/internal/trace/testtrace/validation.go +++ b/src/internal/trace/testtrace/validation.go @@ -23,6 +23,9 @@ type Validator struct { tasks map[trace.TaskID]string lastSync trace.Sync GoVersion version.Version + + // Flags to modify validation behavior. + skipClockSnapshotChecks bool // Some platforms can't guarantee a monotonically increasing clock reading. } type schedContext struct { @@ -53,6 +56,14 @@ func NewValidator() *Validator { } } +// SkipClockSnapshotChecks causes the validator to skip checks on the clock snapshots. +// +// Some platforms like Windows, with a small enough trace period, are unable to produce +// monotonically increasing timestamps due to very coarse clock granularity. +func (v *Validator) SkipClockSnapshotChecks() { + v.skipClockSnapshotChecks = true +} + // Event validates ev as the next event in a stream of trace.Events. // // Returns an error if validation fails. @@ -97,14 +108,16 @@ func (v *Validator) Event(ev trace.Event) error { if s.ClockSnapshot.Trace == 0 { e.Errorf("sync %d has zero trace time", s.N) } - if s.N >= 2 && !s.ClockSnapshot.Wall.After(v.lastSync.ClockSnapshot.Wall) { - e.Errorf("sync %d has non-increasing wall time: %v vs %v", s.N, s.ClockSnapshot.Wall, v.lastSync.ClockSnapshot.Wall) - } - if s.N >= 2 && !(s.ClockSnapshot.Mono > v.lastSync.ClockSnapshot.Mono) { - e.Errorf("sync %d has non-increasing mono time: %v vs %v", s.N, s.ClockSnapshot.Mono, v.lastSync.ClockSnapshot.Mono) - } - if s.N >= 2 && !(s.ClockSnapshot.Trace > v.lastSync.ClockSnapshot.Trace) { - e.Errorf("sync %d has non-increasing trace time: %v vs %v", s.N, s.ClockSnapshot.Trace, v.lastSync.ClockSnapshot.Trace) + if !v.skipClockSnapshotChecks { + if s.N >= 2 && !s.ClockSnapshot.Wall.After(v.lastSync.ClockSnapshot.Wall) { + e.Errorf("sync %d has non-increasing wall time: %v vs %v", s.N, s.ClockSnapshot.Wall, v.lastSync.ClockSnapshot.Wall) + } + if s.N >= 2 && !(s.ClockSnapshot.Mono > v.lastSync.ClockSnapshot.Mono) { + e.Errorf("sync %d has non-increasing mono time: %v vs %v", s.N, s.ClockSnapshot.Mono, v.lastSync.ClockSnapshot.Mono) + } + if s.N >= 2 && !(s.ClockSnapshot.Trace > v.lastSync.ClockSnapshot.Trace) { + e.Errorf("sync %d has non-increasing trace time: %v vs %v", s.N, s.ClockSnapshot.Trace, v.lastSync.ClockSnapshot.Trace) + } } } v.lastSync = s diff --git a/src/internal/trace/trace_test.go b/src/internal/trace/trace_test.go index 8c40e84a81..0aa297d762 100644 --- a/src/internal/trace/trace_test.go +++ b/src/internal/trace/trace_test.go @@ -618,7 +618,16 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace tb := traceBuf.Bytes() // Test the trace and the parser. - testReader(t, bytes.NewReader(tb), version.Current, testtrace.ExpectSuccess()) + v := testtrace.NewValidator() + v.GoVersion = version.Current + if runtime.GOOS == "windows" && stress { + // Under stress mode we're constantly advancing trace generations. + // Windows' clock granularity is too coarse to guarantee monotonic + // timestamps for monotonic and wall clock time in this case, so + // skip the checks. + v.SkipClockSnapshotChecks() + } + testReader(t, bytes.NewReader(tb), v, testtrace.ExpectSuccess()) // Run some extra validation. if !t.Failed() && extra != nil {