"-cpu[values of GOMAXPROCS to use]:number list" \
"-run[run tests and examples matching regexp]:regexp" \
"-bench[run benchmarks matching regexp]:regexp" \
- "-benchtime[run each benchmark during n seconds]:duration" \
+ "-benchtime[run each benchmark until taking this long]:duration" \
"-timeout[kill test after that duration]:duration" \
"-cpuprofile[write CPU profile to file]:file:_files" \
"-memprofile[write heap profile to file]:file:_files" \
-test.timeout t
If a test runs longer than t, panic.
- -test.benchtime n
- Run enough iterations of each benchmark to take n seconds.
+ -test.benchtime t
+ Run enough iterations of each benchmark to take t.
The default is 1 second.
-test.cpu 1,2,4
-test.timeout t
If a test runs longer than t, panic.
- -test.benchtime n
- Run enough iterations of each benchmark to take n seconds.
+ -test.benchtime t
+ Run enough iterations of each benchmark to take t.
The default is 1 second.
-test.cpu 1,2,4
// These flags can be passed with or without a "test." prefix: -v or -test.v.
-bench="": passes -test.bench to test
-benchmem=false: print memory allocation statistics for benchmarks
- -benchtime=1: passes -test.benchtime to test
+ -benchtime=1s: passes -test.benchtime to test
-cpu="": passes -test.cpu to test
-cpuprofile="": passes -test.cpuprofile to test
-memprofile="": passes -test.memprofile to test
)
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
-var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
// An internal type but exported because it is cross-package; part of the implementation
// of go test.
b.runN(n)
// Run the benchmark for at least the specified amount of time.
- d := time.Duration(*benchTime * float64(time.Second))
+ d := *benchTime
for !b.failed && b.duration < d && n < 1e9 {
last := n
// Predict iterations/sec.
)
//
- var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+ var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
//
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
//
// For use like:
// $ go test -c
-// $ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15 -test.cpuprofile=http.prof
+// $ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15s -test.cpuprofile=http.prof
// $ go tool pprof http.test http.prof
// (pprof) web
func BenchmarkServer(b *testing.B) {
)
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
-var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
// Global lock to ensure only one benchmark runs at a time.
b.runN(n)
// Run the benchmark for at least the specified amount of time.
- d := time.Duration(*benchTime * float64(time.Second))
+ d := *benchTime
for !b.failed && b.duration < d && n < 1e9 {
last := n
// Predict iterations/sec.
}
func main() {
- os.Args = []string{os.Args[0], "-test.benchtime=0.1"}
+ os.Args = []string{os.Args[0], "-test.benchtime=100ms"}
flag.Parse()
-
+
rslow := testing.Benchmark(BenchmarkSlowNonASCII)
rfast := testing.Benchmark(BenchmarkFastNonASCII)
tslow := rslow.NsPerOp()