aboutsummaryrefslogtreecommitdiff
path: root/src/testing/benchmark.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/testing/benchmark.go')
-rw-r--r--src/testing/benchmark.go47
1 files changed, 32 insertions, 15 deletions
diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go
index 4d569b7971..ac9ca58397 100644
--- a/src/testing/benchmark.go
+++ b/src/testing/benchmark.go
@@ -251,27 +251,20 @@ func (b *B) run() {
b.context.processBench(b) // Must call doBench.
} else {
// Running func Benchmark.
- b.doBench()
+ b.doBench(0)
}
}
-func (b *B) doBench() BenchmarkResult {
- go b.launch()
+func (b *B) doBench(hint int) BenchmarkResult {
+ go b.launch(hint)
<-b.signal
return b.result
}
-// launch launches the benchmark function. It gradually increases the number
-// of benchmark iterations until the benchmark runs for the requested benchtime.
-// launch is run by the doBench function as a separate goroutine.
-// run1 must have been called on b.
-func (b *B) launch() {
- // Signal that we're done whether we return normally
- // or by FailNow's runtime.Goexit.
- defer func() {
- b.signal <- true
- }()
-
+// autodetectN runs the benchmark function, gradually increasing the
+// number of iterations until the benchmark runs for the requested
+// benchtime.
+func (b *B) autodetectN() {
// Run the benchmark for at least the specified amount of time.
d := b.benchTime
for n := 1; !b.failed && b.duration < d && n < 1e9; {
@@ -289,6 +282,26 @@ func (b *B) launch() {
n = roundUp(n)
b.runN(n)
}
+}
+
+// launch launches the benchmark function for hintN iterations. If
+// hintN == 0, it autodetects the number of benchmark iterations based
+// on the requested benchtime.
+// launch is run by the doBench function as a separate goroutine.
+// run1 must have been called on b.
+func (b *B) launch(hintN int) {
+ // Signal that we're done whether we return normally
+ // or by FailNow's runtime.Goexit.
+ defer func() {
+ b.signal <- true
+ }()
+
+ if hintN == 0 {
+ b.autodetectN()
+ } else {
+ b.runN(hintN)
+ }
+
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes}
}
@@ -426,6 +439,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
// processBench runs bench b for the configured CPU counts and prints the results.
func (ctx *benchContext) processBench(b *B) {
for i, procs := range cpuList {
+ var nHint int
for j := uint(0); j < *count; j++ {
runtime.GOMAXPROCS(procs)
benchName := benchmarkName(b.name, procs)
@@ -444,7 +458,10 @@ func (ctx *benchContext) processBench(b *B) {
}
b.run1()
}
- r := b.doBench()
+ r := b.doBench(nHint)
+ if j == 0 {
+ nHint = b.N
+ }
if b.failed {
// The output could be very long here, but probably isn't.
// We print it all, regardless, because we don't want to trim the reason