// benchmarks should be executed. The default is the current value
// of GOMAXPROCS.
//
+// -failfast
+// Do not start new tests after the first test failure.
+//
// -list regexp
// List tests, benchmarks, or examples matching the regular expression.
// No tests, benchmarks or examples will be run. This will only
}
t.Fatalf("did not see JSON output")
}
+
+func TestFailFast(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+
+ tests := []struct {
+ run string
+ failfast bool
+ nfail int
+ }{
+ {"TestFailingA", true, 1},
+ {"TestFailing[AB]", true, 1},
+ {"TestFailing[AB]", false, 2},
+ // mix with non-failing tests:
+ {"TestA|TestFailing[AB]", true, 1},
+ {"TestA|TestFailing[AB]", false, 2},
+ // mix with parallel tests:
+ {"TestFailingB|TestParallelFailingA", true, 2},
+ {"TestFailingB|TestParallelFailingA", false, 2},
+ {"TestFailingB|TestParallelFailing[AB]", true, 3},
+ {"TestFailingB|TestParallelFailing[AB]", false, 3},
+ // mix with parallel sub-tests
+ {"TestFailingB|TestParallelFailing[AB]|TestParallelFailingSubtestsA", true, 3},
+ {"TestFailingB|TestParallelFailing[AB]|TestParallelFailingSubtestsA", false, 5},
+ {"TestParallelFailingSubtestsA", true, 1},
+ // only parallels:
+ {"TestParallelFailing[AB]", false, 2},
+ // non-parallel subtests:
+ {"TestFailingSubtestsA", true, 1},
+ {"TestFailingSubtestsA", false, 2},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.run, func(t *testing.T) {
+ tg.runFail("test", "./testdata/src/failfast_test.go", "-run="+tt.run, "-failfast="+strconv.FormatBool(tt.failfast))
+
+ nfail := strings.Count(tg.getStdout(), "FAIL - ")
+
+ if nfail != tt.nfail {
+ t.Errorf("go test -run=%s -failfast=%t printed %d FAILs, want %d", tt.run, tt.failfast, nfail, tt.nfail)
+ }
+ })
+ }
+}
const testFlag2 = `
-bench regexp
Run only those benchmarks matching a regular expression.
- By default, no benchmarks are run.
+ By default, no benchmarks are run.
To run all benchmarks, use '-bench .' or '-bench=.'.
The regular expression is split by unbracketed slash (/)
characters into a sequence of regular expressions, and each
benchmarks should be executed. The default is the current value
of GOMAXPROCS.
+ -failfast
+ Do not start new tests after the first test failure.
+
-list regexp
List tests, benchmarks, or examples matching the regular expression.
No tests, benchmarks or examples will be run. This will only
{Name: "bench", PassToTest: true},
{Name: "benchmem", BoolVar: new(bool), PassToTest: true},
{Name: "benchtime", PassToTest: true},
+ {Name: "blockprofile", PassToTest: true},
+ {Name: "blockprofilerate", PassToTest: true},
{Name: "count", PassToTest: true},
{Name: "coverprofile", PassToTest: true},
{Name: "cpu", PassToTest: true},
{Name: "cpuprofile", PassToTest: true},
+ {Name: "failfast", BoolVar: new(bool), PassToTest: true},
{Name: "list", PassToTest: true},
{Name: "memprofile", PassToTest: true},
{Name: "memprofilerate", PassToTest: true},
- {Name: "blockprofile", PassToTest: true},
- {Name: "blockprofilerate", PassToTest: true},
{Name: "mutexprofile", PassToTest: true},
{Name: "mutexprofilefraction", PassToTest: true},
{Name: "outputdir", PassToTest: true},
--- /dev/null
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package failfast
+
+import "testing"
+
+func TestA(t *testing.T) {
+ // Edge-case testing, mixing unparallel tests too
+ t.Logf("LOG: %s", t.Name())
+}
+
+func TestFailingA(t *testing.T) {
+ t.Errorf("FAIL - %s", t.Name())
+}
+
+func TestB(t *testing.T) {
+ // Edge-case testing, mixing unparallel tests too
+ t.Logf("LOG: %s", t.Name())
+}
+
+func TestParallelFailingA(t *testing.T) {
+ t.Parallel()
+ t.Errorf("FAIL - %s", t.Name())
+}
+
+func TestParallelFailingB(t *testing.T) {
+ t.Parallel()
+ t.Errorf("FAIL - %s", t.Name())
+}
+
+func TestParallelFailingSubtestsA(t *testing.T) {
+ t.Parallel()
+ t.Run("TestFailingSubtestsA1", func(t *testing.T) {
+ t.Errorf("FAIL - %s", t.Name())
+ })
+ t.Run("TestFailingSubtestsA2", func(t *testing.T) {
+ t.Errorf("FAIL - %s", t.Name())
+ })
+}
+
+func TestFailingSubtestsA(t *testing.T) {
+ t.Run("TestFailingSubtestsA1", func(t *testing.T) {
+ t.Errorf("FAIL - %s", t.Name())
+ })
+ t.Run("TestFailingSubtestsA2", func(t *testing.T) {
+ t.Errorf("FAIL - %s", t.Name())
+ })
+}
+
+func TestFailingB(t *testing.T) {
+ t.Errorf("FAIL - %s", t.Name())
+}
// full test of the package.
short = flag.Bool("test.short", false, "run smaller test suite to save time")
+ // The failfast flag requests that test execution stop after the first test failure.
+ failFast = flag.Bool("test.failfast", false, "do not start new tests after the first test failure")
+
// The directory in which to create profile files and the like. When run from
// "go test", the binary always runs in the source directory for the package;
// this flag lets "go test" tell the binary to write the files in the directory where
haveExamples bool // are there examples?
cpuList []int
+
+ numFailed uint32 // number of test failures
)
// common holds the elements common between T and B and
t.start = time.Now()
t.raceErrors = -race.Errors()
fn(t)
+
+ if t.failed {
+ atomic.AddUint32(&numFailed, 1)
+ }
t.finished = true
}
func (t *T) Run(name string, f func(t *T)) bool {
atomic.StoreInt32(&t.hasSub, 1)
testName, ok, _ := t.context.match.fullName(&t.common, name)
- if !ok {
+ if !ok || shouldFailFast() {
return true
}
t = &T{
for _, procs := range cpuList {
runtime.GOMAXPROCS(procs)
for i := uint(0); i < *count; i++ {
+ if shouldFailFast() {
+ break
+ }
ctx := newTestContext(*parallel, newMatcher(matchString, *match, "-test.run"))
t := &T{
common: common{
cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
}
}
+
+func shouldFailFast() bool {
+ return *failFast && atomic.LoadUint32(&numFailed) > 0
+}