aboutsummaryrefslogtreecommitdiff
path: root/src/testing/benchmark.go
diff options
context:
space:
mode:
authorMarcel van Lohuizen <mpvl@golang.org>2016-01-29 16:55:35 +0100
committerMarcel van Lohuizen <mpvl@golang.org>2016-03-18 11:35:16 +0000
commit89cda2db007c8389ba39d292c6372ff0c6a7622f (patch)
treeacc06bf69c07456fd6b8607cbf0222d6cc01cb6b /src/testing/benchmark.go
parent5c83e651adfa78d73634557cfaf2fbc9bde599f0 (diff)
downloadgo-89cda2db007c8389ba39d292c6372ff0c6a7622f.tar.xz
testing: hoisted chunks of code to prepare for Run method
testing.go: - run method will evolve into the Run method. - added level field in common benchmark.go: - benchContext will be central to distinguish handling of benchmarks between normal Run methods and ones called from within Benchmark function. - expandCPU will evolve into the processing hook for Run methods called within normal processing. - runBench will evolve into the Run method. Change-Id: I1816f9985d5ba94deb0ad062302ea9aee0bb5338 Reviewed-on: https://go-review.googlesource.com/18894 Reviewed-by: Russ Cox <rsc@golang.org>
Diffstat (limited to 'src/testing/benchmark.go')
-rw-r--r--src/testing/benchmark.go99
1 files changed, 65 insertions, 34 deletions
diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go
index b092a9d9e2..4dac1e6d63 100644
--- a/src/testing/benchmark.go
+++ b/src/testing/benchmark.go
@@ -46,6 +46,7 @@ type InternalBenchmark struct {
// affecting benchmark results.
type B struct {
common
+ context *benchContext
N int
previousN int // number of iterations in the previous run
previousDuration time.Duration // total duration of the previous run
@@ -299,6 +300,10 @@ func benchmarkName(name string, n int) string {
return name
}
+type benchContext struct {
+ maxLen int // The largest recorded benchmark name.
+}
+
// An internal function but exported because it is cross-package; part of the implementation
// of the "go test" command.
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
@@ -334,46 +339,72 @@ func runBenchmarksInternal(matchString func(pat, str string) (bool, error), benc
}
}
ok := true
+ main := &B{
+ common: common{name: "Main"},
+ context: &benchContext{
+ maxLen: maxlen,
+ },
+ }
for _, Benchmark := range bs {
- for _, procs := range cpuList {
- runtime.GOMAXPROCS(procs)
- b := &B{
- common: common{
- signal: make(chan bool),
- name: Benchmark.Name,
- },
- benchFunc: Benchmark.F,
- }
- benchName := benchmarkName(Benchmark.Name, procs)
- fmt.Printf("%-*s\t", maxlen, benchName)
- r := b.run()
- if b.failed {
- ok = false
- // The output could be very long here, but probably isn't.
- // We print it all, regardless, because we don't want to trim the reason
- // the benchmark failed.
- fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
- continue
- }
- results := r.String()
- if *benchmarkMemory || b.showAllocResult {
- results += "\t" + r.MemString()
- }
- fmt.Println(results)
- // Unlike with tests, we ignore the -chatty flag and always print output for
- // benchmarks since the output generation time will skew the results.
- if len(b.output) > 0 {
- b.trimOutput()
- fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
- }
- if p := runtime.GOMAXPROCS(-1); p != procs {
- fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
- }
+ ok = ok && expandCPU(main, Benchmark)
+ }
+ return ok
+}
+
+func expandCPU(parent *B, Benchmark InternalBenchmark) bool {
+ ok := true
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ benchName := benchmarkName(Benchmark.Name, procs)
+ fmt.Printf("%-*s\t", parent.context.maxLen, benchName)
+ b := parent.runBench(Benchmark.Name, Benchmark.F)
+ r := b.result
+ if b.failed {
+ ok = false
+ // The output could be very long here, but probably isn't.
+ // We print it all, regardless, because we don't want to trim the reason
+ // the benchmark failed.
+ fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
+ continue
+ }
+ results := r.String()
+ if *benchmarkMemory || b.showAllocResult {
+ results += "\t" + r.MemString()
+ }
+ fmt.Println(results)
+ // Unlike with tests, we ignore the -chatty flag and always print output for
+ // benchmarks since the output generation time will skew the results.
+ if len(b.output) > 0 {
+ b.trimOutput()
+ fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
+ }
+ if p := runtime.GOMAXPROCS(-1); p != procs {
+ fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
}
}
return ok
}
+// runBench benchmarks f as a subbenchmark with the given name. It reports
+// whether there were any failures.
+//
+// A subbenchmark is like any other benchmark. A benchmark that calls Run at
+// least once will not be measured itself and will only run for one iteration.
+func (b *B) runBench(name string, f func(b *B)) *B {
+ sub := &B{
+ common: common{
+ signal: make(chan bool),
+ name: name,
+ parent: &b.common,
+ level: b.level + 1,
+ },
+ benchFunc: f,
+ context: b.context,
+ }
+ sub.run()
+ return sub
+}
+
// trimOutput shortens the output from a benchmark, which can be very long.
func (b *B) trimOutput() {
// The output is likely to appear multiple times because the benchmark