if err != nil {
t.Fatalf("failed to parse test file at %s: %v", testPath, err)
}
- testReader(t, tr, ver, exp)
+ v := testtrace.NewValidator()
+ v.GoVersion = ver
+ testReader(t, tr, v, exp)
})
}
}
})
}
-func testReader(t *testing.T, tr io.Reader, ver version.Version, exp *testtrace.Expectation) {
+func testReader(t *testing.T, tr io.Reader, v *testtrace.Validator, exp *testtrace.Expectation) {
r, err := trace.NewReader(tr)
if err != nil {
if err := exp.Check(err); err != nil {
}
return
}
- v := testtrace.NewValidator()
- v.GoVersion = ver
for {
ev, err := r.ReadEvent()
if err == io.EOF {
tasks map[trace.TaskID]string
lastSync trace.Sync
GoVersion version.Version
+
+ // Flags to modify validation behavior.
+ skipClockSnapshotChecks bool // Some platforms can't guarantee a monotonically increasing clock reading.
}
type schedContext struct {
}
}
+// SkipClockSnapshotChecks causes the validator to skip checks on the clock snapshots.
+//
+// Some platforms like Windows, with a small enough trace period, are unable to produce
+// monotonically increasing timestamps due to very coarse clock granularity.
+func (v *Validator) SkipClockSnapshotChecks() {
+ v.skipClockSnapshotChecks = true
+}
+
// Event validates ev as the next event in a stream of trace.Events.
//
// Returns an error if validation fails.
if s.ClockSnapshot.Trace == 0 {
e.Errorf("sync %d has zero trace time", s.N)
}
- if s.N >= 2 && !s.ClockSnapshot.Wall.After(v.lastSync.ClockSnapshot.Wall) {
- e.Errorf("sync %d has non-increasing wall time: %v vs %v", s.N, s.ClockSnapshot.Wall, v.lastSync.ClockSnapshot.Wall)
- }
- if s.N >= 2 && !(s.ClockSnapshot.Mono > v.lastSync.ClockSnapshot.Mono) {
- e.Errorf("sync %d has non-increasing mono time: %v vs %v", s.N, s.ClockSnapshot.Mono, v.lastSync.ClockSnapshot.Mono)
- }
- if s.N >= 2 && !(s.ClockSnapshot.Trace > v.lastSync.ClockSnapshot.Trace) {
- e.Errorf("sync %d has non-increasing trace time: %v vs %v", s.N, s.ClockSnapshot.Trace, v.lastSync.ClockSnapshot.Trace)
+ if !v.skipClockSnapshotChecks {
+ if s.N >= 2 && !s.ClockSnapshot.Wall.After(v.lastSync.ClockSnapshot.Wall) {
+ e.Errorf("sync %d has non-increasing wall time: %v vs %v", s.N, s.ClockSnapshot.Wall, v.lastSync.ClockSnapshot.Wall)
+ }
+ if s.N >= 2 && !(s.ClockSnapshot.Mono > v.lastSync.ClockSnapshot.Mono) {
+ e.Errorf("sync %d has non-increasing mono time: %v vs %v", s.N, s.ClockSnapshot.Mono, v.lastSync.ClockSnapshot.Mono)
+ }
+ if s.N >= 2 && !(s.ClockSnapshot.Trace > v.lastSync.ClockSnapshot.Trace) {
+ e.Errorf("sync %d has non-increasing trace time: %v vs %v", s.N, s.ClockSnapshot.Trace, v.lastSync.ClockSnapshot.Trace)
+ }
}
}
v.lastSync = s
tb := traceBuf.Bytes()
// Test the trace and the parser.
- testReader(t, bytes.NewReader(tb), version.Current, testtrace.ExpectSuccess())
+ v := testtrace.NewValidator()
+ v.GoVersion = version.Current
+ if runtime.GOOS == "windows" && stress {
+ // Under stress mode we're constantly advancing trace generations.
+ // Windows' clock granularity is too coarse to guarantee monotonic
+ // timestamps for monotonic and wall clock time in this case, so
+ // skip the checks.
+ v.SkipClockSnapshotChecks()
+ }
+ testReader(t, bytes.NewReader(tb), v, testtrace.ExpectSuccess())
// Run some extra validation.
if !t.Failed() && extra != nil {