mirror of
https://github.com/encounter/dawn-cmake.git
synced 2025-12-11 06:27:54 +00:00
tools: Add perfmon
A tool to continually automatically generate performance metrics for tint CLs. perfmon monitors gerrit changes, benchmarks them and posts results to the gerrit change. Commit changes are also benchmarked, and results are automatically posted to: https://tint-perfmon-bot.github.io/tint-perf Bug: tint:1383 Change-Id: I3470b170046e1d9af456f5e3a1d6ff76c305898a Reviewed-on: https://dawn-review.googlesource.com/c/tint/+/77940 Kokoro: Kokoro <noreply+kokoro@google.com> Reviewed-by: Ryan Harrison <rharrison@chromium.org> Reviewed-by: Antonio Maiorano <amaiorano@google.com> Commit-Queue: Ben Clayton <bclayton@google.com>
This commit is contained in:
committed by
Tint LUCI CQ
parent
3cdb8e3c3e
commit
c126bc95df
@@ -20,131 +20,398 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Test holds the results of a single benchmark test.
|
||||
type Test struct {
|
||||
Name string
|
||||
NumTasks uint
|
||||
NumThreads uint
|
||||
Duration time.Duration
|
||||
Iterations uint
|
||||
// Run holds all the benchmark results for a run, along with the context
|
||||
// information for the run.
|
||||
type Run struct {
|
||||
Benchmarks []Benchmark
|
||||
Context *Context
|
||||
}
|
||||
|
||||
var testVarRE = regexp.MustCompile(`([\w])+:([0-9]+)`)
|
||||
|
||||
func (t *Test) parseName() {
|
||||
for _, match := range testVarRE.FindAllStringSubmatch(t.Name, -1) {
|
||||
if len(match) != 3 {
|
||||
continue
|
||||
}
|
||||
n, err := strconv.Atoi(match[2])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
switch match[1] {
|
||||
case "threads":
|
||||
t.NumThreads = uint(n)
|
||||
case "tasks":
|
||||
t.NumTasks = uint(n)
|
||||
}
|
||||
}
|
||||
// Context provides information about the environment used to perform the
|
||||
// benchmark.
|
||||
type Context struct {
|
||||
Date time.Time
|
||||
HostName string
|
||||
Executable string
|
||||
NumCPUs int
|
||||
MhzPerCPU int
|
||||
CPUScalingEnabled bool
|
||||
Caches []ContextCache
|
||||
LoadAvg []float32
|
||||
LibraryBuildType string
|
||||
}
|
||||
|
||||
// Benchmark holds a set of benchmark test results.
|
||||
// ContextCache holds information about one of the system caches.
|
||||
type ContextCache struct {
|
||||
Type string
|
||||
Level int
|
||||
Size int
|
||||
NumSharing int
|
||||
}
|
||||
|
||||
// Benchmark holds the results of a single benchmark test.
|
||||
type Benchmark struct {
|
||||
Tests []Test
|
||||
Name string
|
||||
Duration time.Duration
|
||||
AggregateType AggregateType
|
||||
}
|
||||
|
||||
// AggregateType is an enumerator of benchmark aggregate types.
|
||||
type AggregateType string
|
||||
|
||||
// Enumerator values of AggregateType
|
||||
const (
|
||||
NonAggregate AggregateType = "NonAggregate"
|
||||
Mean AggregateType = "mean"
|
||||
Median AggregateType = "median"
|
||||
Stddev AggregateType = "stddev"
|
||||
)
|
||||
|
||||
// Parse parses the benchmark results from the string s.
|
||||
// Parse will handle the json and 'console' formats.
|
||||
func Parse(s string) (Benchmark, error) {
|
||||
type Parser = func(s string) (Benchmark, error)
|
||||
func Parse(s string) (Run, error) {
|
||||
type Parser = func(s string) (Run, error)
|
||||
for _, parser := range []Parser{parseConsole, parseJSON} {
|
||||
b, err := parser(s)
|
||||
r, err := parser(s)
|
||||
switch err {
|
||||
case nil:
|
||||
return b, nil
|
||||
return r, nil
|
||||
case errWrongFormat:
|
||||
default:
|
||||
return Benchmark{}, err
|
||||
return Run{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return Benchmark{}, errors.New("Unrecognised file format")
|
||||
return Run{}, errors.New("Unrecognised file format")
|
||||
}
|
||||
|
||||
var errWrongFormat = errors.New("Wrong format")
|
||||
var consoleLineRE = regexp.MustCompile(`([\w/:]+)\s+([0-9]+(?:.[0-9]+)?) ns\s+[0-9]+(?:.[0-9]+) ns\s+([0-9]+)`)
|
||||
|
||||
func parseConsole(s string) (Benchmark, error) {
|
||||
func parseConsole(s string) (Run, error) {
|
||||
blocks := strings.Split(s, "------------------------------------------------------------------------------------------")
|
||||
if len(blocks) != 3 {
|
||||
return Benchmark{}, errWrongFormat
|
||||
return Run{}, errWrongFormat
|
||||
}
|
||||
|
||||
lines := strings.Split(blocks[2], "\n")
|
||||
b := Benchmark{
|
||||
Tests: make([]Test, 0, len(lines)),
|
||||
}
|
||||
b := make([]Benchmark, 0, len(lines))
|
||||
|
||||
for _, line := range lines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
matches := consoleLineRE.FindStringSubmatch(line)
|
||||
if len(matches) != 4 {
|
||||
return Benchmark{}, fmt.Errorf("Unable to parse the line:\n" + line)
|
||||
return Run{}, fmt.Errorf("Unable to parse the line:\n" + line)
|
||||
}
|
||||
ns, err := strconv.ParseFloat(matches[2], 64)
|
||||
if err != nil {
|
||||
return Benchmark{}, fmt.Errorf("Unable to parse the duration: " + matches[2])
|
||||
}
|
||||
iterations, err := strconv.Atoi(matches[3])
|
||||
if err != nil {
|
||||
return Benchmark{}, fmt.Errorf("Unable to parse the number of iterations: " + matches[3])
|
||||
return Run{}, fmt.Errorf("Unable to parse the duration: " + matches[2])
|
||||
}
|
||||
|
||||
t := Test{
|
||||
Name: matches[1],
|
||||
Duration: time.Nanosecond * time.Duration(ns),
|
||||
Iterations: uint(iterations),
|
||||
}
|
||||
t.parseName()
|
||||
b.Tests = append(b.Tests, t)
|
||||
b = append(b, Benchmark{
|
||||
Name: trimAggregateSuffix(matches[1]),
|
||||
Duration: time.Nanosecond * time.Duration(ns),
|
||||
})
|
||||
}
|
||||
return b, nil
|
||||
return Run{Benchmarks: b}, nil
|
||||
}
|
||||
|
||||
func parseJSON(s string) (Benchmark, error) {
|
||||
type T struct {
|
||||
Name string `json:"name"`
|
||||
Iterations uint `json:"iterations"`
|
||||
Time float64 `json:"real_time"`
|
||||
func parseJSON(s string) (Run, error) {
|
||||
type Data struct {
|
||||
Context struct {
|
||||
Date time.Time `json:"date"`
|
||||
HostName string `json:"host_name"`
|
||||
Executable string `json:"executable"`
|
||||
NumCPUs int `json:"num_cpus"`
|
||||
MhzPerCPU int `json:"mhz_per_cpu"`
|
||||
CPUScalingEnabled bool `json:"cpu_scaling_enabled"`
|
||||
LoadAvg []float32 `json:"load_avg"`
|
||||
LibraryBuildType string `json:"library_build_type"`
|
||||
Caches []struct {
|
||||
Type string `json:"type"`
|
||||
Level int `json:"level"`
|
||||
Size int `json:"size"`
|
||||
NumSharing int `json:"num_sharing"`
|
||||
} `json:"caches"`
|
||||
} `json:"context"`
|
||||
Benchmarks []struct {
|
||||
Name string `json:"name"`
|
||||
Time float64 `json:"real_time"`
|
||||
AggregateType AggregateType `json:"aggregate_name"`
|
||||
} `json:"benchmarks"`
|
||||
}
|
||||
type B struct {
|
||||
Tests []T `json:"benchmarks"`
|
||||
}
|
||||
b := B{}
|
||||
data := Data{}
|
||||
d := json.NewDecoder(strings.NewReader(s))
|
||||
if err := d.Decode(&b); err != nil {
|
||||
return Benchmark{}, err
|
||||
if err := d.Decode(&data); err != nil {
|
||||
return Run{}, err
|
||||
}
|
||||
|
||||
out := Benchmark{
|
||||
Tests: make([]Test, len(b.Tests)),
|
||||
out := Run{
|
||||
Benchmarks: make([]Benchmark, len(data.Benchmarks)),
|
||||
Context: &Context{
|
||||
Date: data.Context.Date,
|
||||
HostName: data.Context.HostName,
|
||||
Executable: data.Context.Executable,
|
||||
NumCPUs: data.Context.NumCPUs,
|
||||
MhzPerCPU: data.Context.MhzPerCPU,
|
||||
CPUScalingEnabled: data.Context.CPUScalingEnabled,
|
||||
LoadAvg: data.Context.LoadAvg,
|
||||
LibraryBuildType: data.Context.LibraryBuildType,
|
||||
Caches: make([]ContextCache, len(data.Context.Caches)),
|
||||
},
|
||||
}
|
||||
for i, test := range b.Tests {
|
||||
t := Test{
|
||||
Name: test.Name,
|
||||
Duration: time.Nanosecond * time.Duration(int64(test.Time)),
|
||||
Iterations: test.Iterations,
|
||||
for i, c := range data.Context.Caches {
|
||||
out.Context.Caches[i] = ContextCache{
|
||||
Type: c.Type,
|
||||
Level: c.Level,
|
||||
Size: c.Size,
|
||||
NumSharing: c.NumSharing,
|
||||
}
|
||||
}
|
||||
for i, b := range data.Benchmarks {
|
||||
out.Benchmarks[i] = Benchmark{
|
||||
Name: trimAggregateSuffix(b.Name),
|
||||
Duration: time.Nanosecond * time.Duration(int64(b.Time)),
|
||||
AggregateType: b.AggregateType,
|
||||
}
|
||||
t.parseName()
|
||||
out.Tests[i] = t
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Diff describes the difference between two benchmarks
|
||||
type Diff struct {
|
||||
TestName string
|
||||
Delta time.Duration // Δ (A → B)
|
||||
PercentChangeAB float64 // % (A → B)
|
||||
PercentChangeBA float64 // % (A → B)
|
||||
MultiplierChangeAB float64 // × (A → B)
|
||||
MultiplierChangeBA float64 // × (A → B)
|
||||
TimeA time.Duration // A
|
||||
TimeB time.Duration // B
|
||||
}
|
||||
|
||||
// Diffs is a list of Diff
|
||||
type Diffs []Diff
|
||||
|
||||
// DiffFormat describes how a list of diffs should be formatted
|
||||
type DiffFormat struct {
|
||||
TestName bool
|
||||
Delta bool
|
||||
PercentChangeAB bool
|
||||
PercentChangeBA bool
|
||||
MultiplierChangeAB bool
|
||||
MultiplierChangeBA bool
|
||||
TimeA bool
|
||||
TimeB bool
|
||||
}
|
||||
|
||||
func (diffs Diffs) Format(f DiffFormat) string {
|
||||
if len(diffs) == 0 {
|
||||
return "<no changes>"
|
||||
}
|
||||
|
||||
type row []string
|
||||
|
||||
header := row{}
|
||||
if f.TestName {
|
||||
header = append(header, "Test name")
|
||||
}
|
||||
if f.Delta {
|
||||
header = append(header, "Δ (A → B)")
|
||||
}
|
||||
if f.PercentChangeAB {
|
||||
header = append(header, "% (A → B)")
|
||||
}
|
||||
if f.PercentChangeBA {
|
||||
header = append(header, "% (B → A)")
|
||||
}
|
||||
if f.MultiplierChangeAB {
|
||||
header = append(header, "× (A → B)")
|
||||
}
|
||||
if f.MultiplierChangeBA {
|
||||
header = append(header, "× (B → A)")
|
||||
}
|
||||
if f.TimeA {
|
||||
header = append(header, "A")
|
||||
}
|
||||
if f.TimeB {
|
||||
header = append(header, "B")
|
||||
}
|
||||
if len(header) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
columns := []row{}
|
||||
for _, d := range diffs {
|
||||
r := make(row, 0, len(header))
|
||||
if f.TestName {
|
||||
r = append(r, d.TestName)
|
||||
}
|
||||
if f.Delta {
|
||||
r = append(r, fmt.Sprintf("%v", d.Delta))
|
||||
}
|
||||
if f.PercentChangeAB {
|
||||
r = append(r, fmt.Sprintf("%+2.1f%%", d.PercentChangeAB))
|
||||
}
|
||||
if f.PercentChangeBA {
|
||||
r = append(r, fmt.Sprintf("%+2.1f%%", d.PercentChangeBA))
|
||||
}
|
||||
if f.MultiplierChangeAB {
|
||||
r = append(r, fmt.Sprintf("%+.4f", d.MultiplierChangeAB))
|
||||
}
|
||||
if f.MultiplierChangeBA {
|
||||
r = append(r, fmt.Sprintf("%+.4f", d.MultiplierChangeBA))
|
||||
}
|
||||
if f.TimeA {
|
||||
r = append(r, fmt.Sprintf("%v", d.TimeA))
|
||||
}
|
||||
if f.TimeB {
|
||||
r = append(r, fmt.Sprintf("%v", d.TimeB))
|
||||
}
|
||||
columns = append(columns, r)
|
||||
}
|
||||
|
||||
// measure
|
||||
widths := make([]int, len(header))
|
||||
for i, h := range header {
|
||||
widths[i] = utf8.RuneCountInString(h)
|
||||
}
|
||||
for _, row := range columns {
|
||||
for i, cell := range row {
|
||||
l := utf8.RuneCountInString(cell)
|
||||
if widths[i] < l {
|
||||
widths[i] = l
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pad := func(s string, i int) string {
|
||||
if n := i - utf8.RuneCountInString(s); n > 0 {
|
||||
return s + strings.Repeat(" ", n)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Draw table
|
||||
b := &strings.Builder{}
|
||||
|
||||
horizontal_bar := func() {
|
||||
for i := range header {
|
||||
fmt.Fprintf(b, "+%v", strings.Repeat("-", 2+widths[i]))
|
||||
}
|
||||
fmt.Fprintln(b, "+")
|
||||
}
|
||||
|
||||
horizontal_bar()
|
||||
|
||||
for i, h := range header {
|
||||
fmt.Fprintf(b, "| %v ", pad(h, widths[i]))
|
||||
}
|
||||
fmt.Fprintln(b, "|")
|
||||
|
||||
horizontal_bar()
|
||||
|
||||
for _, row := range columns {
|
||||
for i, cell := range row {
|
||||
fmt.Fprintf(b, "| %v ", pad(cell, widths[i]))
|
||||
}
|
||||
fmt.Fprintln(b, "|")
|
||||
}
|
||||
|
||||
horizontal_bar()
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Compare returns a string describing differences in the two benchmarks
|
||||
// Absolute benchmark differences less than minDiff are omitted
|
||||
// Absolute relative differences between [1, 1+x] are omitted
|
||||
func Compare(a, b []Benchmark, minDiff time.Duration, minRelDiff float64) Diffs {
|
||||
type times struct {
|
||||
a time.Duration
|
||||
b time.Duration
|
||||
}
|
||||
byName := map[string]times{}
|
||||
for _, test := range a {
|
||||
byName[test.Name] = times{a: test.Duration}
|
||||
}
|
||||
for _, test := range b {
|
||||
t := byName[test.Name]
|
||||
t.b = test.Duration
|
||||
byName[test.Name] = t
|
||||
}
|
||||
|
||||
type delta struct {
|
||||
name string
|
||||
times times
|
||||
relDiff float64
|
||||
absRelDiff float64
|
||||
}
|
||||
deltas := []delta{}
|
||||
for name, times := range byName {
|
||||
if times.a == 0 || times.b == 0 {
|
||||
continue // Assuming test was missing from a or b
|
||||
}
|
||||
diff := times.b - times.a
|
||||
absDiff := diff
|
||||
if absDiff < 0 {
|
||||
absDiff = -absDiff
|
||||
}
|
||||
if absDiff < minDiff {
|
||||
continue
|
||||
}
|
||||
|
||||
relDiff := float64(times.b) / float64(times.a)
|
||||
absRelDiff := relDiff
|
||||
if absRelDiff < 1 {
|
||||
absRelDiff = 1.0 / absRelDiff
|
||||
}
|
||||
if absRelDiff < (1.0 + minRelDiff) {
|
||||
continue
|
||||
}
|
||||
|
||||
d := delta{
|
||||
name: name,
|
||||
times: times,
|
||||
relDiff: relDiff,
|
||||
absRelDiff: absRelDiff,
|
||||
}
|
||||
deltas = append(deltas, d)
|
||||
}
|
||||
|
||||
sort.Slice(deltas, func(i, j int) bool { return deltas[j].relDiff < deltas[i].relDiff })
|
||||
|
||||
out := make(Diffs, len(deltas))
|
||||
|
||||
for i, delta := range deltas {
|
||||
a2b := delta.times.b - delta.times.a
|
||||
out[i] = Diff{
|
||||
TestName: delta.name,
|
||||
Delta: a2b,
|
||||
PercentChangeAB: 100 * float64(a2b) / float64(delta.times.a),
|
||||
PercentChangeBA: 100 * float64(-a2b) / float64(delta.times.b),
|
||||
MultiplierChangeAB: float64(delta.times.b) / float64(delta.times.a),
|
||||
MultiplierChangeBA: float64(delta.times.a) / float64(delta.times.b),
|
||||
TimeA: delta.times.a,
|
||||
TimeB: delta.times.b,
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func trimAggregateSuffix(name string) string {
|
||||
name = strings.TrimSuffix(name, "_stddev")
|
||||
name = strings.TrimSuffix(name, "_mean")
|
||||
name = strings.TrimSuffix(name, "_median")
|
||||
return name
|
||||
}
|
||||
|
||||
244
tools/src/bench/bench_test.go
Normal file
244
tools/src/bench/bench_test.go
Normal file
@@ -0,0 +1,244 @@
|
||||
// Copyright 2022 The Tint Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bench_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dawn.googlesource.com/tint/tools/src/bench"
|
||||
)
|
||||
|
||||
func TestParseJson(t *testing.T) {
|
||||
json := `
|
||||
{
|
||||
"context": {
|
||||
"date": "2022-01-24T10:28:13+00:00",
|
||||
"host_name": "hostname",
|
||||
"executable": "./myexe",
|
||||
"num_cpus": 16,
|
||||
"mhz_per_cpu": 2400,
|
||||
"cpu_scaling_enabled": false,
|
||||
"caches": [
|
||||
{
|
||||
"type": "Data",
|
||||
"level": 1,
|
||||
"size": 32768,
|
||||
"num_sharing": 2
|
||||
},
|
||||
{
|
||||
"type": "Instruction",
|
||||
"level": 1,
|
||||
"size": 32768,
|
||||
"num_sharing": 2
|
||||
},
|
||||
{
|
||||
"type": "Unified",
|
||||
"level": 2,
|
||||
"size": 262144,
|
||||
"num_sharing": 2
|
||||
},
|
||||
{
|
||||
"type": "Unified",
|
||||
"level": 3,
|
||||
"size": 16777216,
|
||||
"num_sharing": 16
|
||||
}
|
||||
],
|
||||
"load_avg": [2.60938,2.59863,2.55566],
|
||||
"library_build_type": "release"
|
||||
},
|
||||
"benchmarks": [
|
||||
{
|
||||
"name": "MyBenchmark",
|
||||
"family_index": 0,
|
||||
"per_family_instance_index": 0,
|
||||
"run_name": "MyBenchmark",
|
||||
"run_type": "iteration",
|
||||
"repetitions": 2,
|
||||
"repetition_index": 0,
|
||||
"threads": 1,
|
||||
"iterations": 402,
|
||||
"real_time": 1.6392272438353568e+06,
|
||||
"cpu_time": 1.6387412935323382e+06,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "MyBenchmark",
|
||||
"family_index": 0,
|
||||
"per_family_instance_index": 0,
|
||||
"run_name": "MyBenchmark",
|
||||
"run_type": "iteration",
|
||||
"repetitions": 2,
|
||||
"repetition_index": 1,
|
||||
"threads": 1,
|
||||
"iterations": 402,
|
||||
"real_time": 1.7143936117703272e+06,
|
||||
"cpu_time": 1.7124004975124374e+06,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "MyBenchmark_mean",
|
||||
"family_index": 0,
|
||||
"per_family_instance_index": 0,
|
||||
"run_name": "MyBenchmark",
|
||||
"run_type": "aggregate",
|
||||
"repetitions": 2,
|
||||
"threads": 1,
|
||||
"aggregate_name": "mean",
|
||||
"iterations": 2,
|
||||
"real_time": 1.6768104278028419e+06,
|
||||
"cpu_time": 1.6755708955223879e+06,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "MyBenchmark_median",
|
||||
"family_index": 0,
|
||||
"per_family_instance_index": 0,
|
||||
"run_name": "MyBenchmark",
|
||||
"run_type": "aggregate",
|
||||
"repetitions": 2,
|
||||
"threads": 1,
|
||||
"aggregate_name": "median",
|
||||
"iterations": 2,
|
||||
"real_time": 1.6768104278028419e+06,
|
||||
"cpu_time": 1.6755708955223879e+06,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "MyBenchmark_stddev",
|
||||
"family_index": 0,
|
||||
"per_family_instance_index": 0,
|
||||
"run_name": "MyBenchmark",
|
||||
"run_type": "aggregate",
|
||||
"repetitions": 2,
|
||||
"threads": 1,
|
||||
"aggregate_name": "stddev",
|
||||
"iterations": 2,
|
||||
"real_time": 5.3150648483981553e+04,
|
||||
"cpu_time": 5.2084922631119407e+04,
|
||||
"time_unit": "ns"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
got, err := bench.Parse(json)
|
||||
if err != nil {
|
||||
t.Errorf("bench.Parse() returned %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
expectedDate, err := time.Parse(time.RFC1123, "Mon, 24 Jan 2022 10:28:13 GMT")
|
||||
if err != nil {
|
||||
t.Errorf("time.Parse() returned %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
expect := bench.Run{
|
||||
Benchmarks: []bench.Benchmark{
|
||||
{Name: "MyBenchmark", Duration: time.Nanosecond * 1639227, AggregateType: ""},
|
||||
{Name: "MyBenchmark", Duration: time.Nanosecond * 1714393, AggregateType: ""},
|
||||
{Name: "MyBenchmark", Duration: time.Nanosecond * 1676810, AggregateType: "mean"},
|
||||
{Name: "MyBenchmark", Duration: time.Nanosecond * 1676810, AggregateType: "median"},
|
||||
{Name: "MyBenchmark", Duration: time.Nanosecond * 53150, AggregateType: "stddev"},
|
||||
},
|
||||
Context: &bench.Context{
|
||||
Date: expectedDate,
|
||||
HostName: "hostname",
|
||||
Executable: "./myexe",
|
||||
NumCPUs: 16,
|
||||
MhzPerCPU: 2400, CPUScalingEnabled: false,
|
||||
Caches: []bench.ContextCache{
|
||||
{Type: "Data", Level: 1, Size: 32768, NumSharing: 2},
|
||||
{Type: "Instruction", Level: 1, Size: 32768, NumSharing: 2},
|
||||
{Type: "Unified", Level: 2, Size: 262144, NumSharing: 2},
|
||||
{Type: "Unified", Level: 3, Size: 16777216, NumSharing: 16},
|
||||
},
|
||||
LoadAvg: []float32{2.60938, 2.59863, 2.55566}, LibraryBuildType: "release"},
|
||||
}
|
||||
|
||||
expectEqual(t, "bench.Parse().Benchmarks", got.Benchmarks, expect.Benchmarks)
|
||||
expectEqual(t, "bench.Parse().Context", got.Benchmarks, expect.Benchmarks)
|
||||
}
|
||||
|
||||
func TestCompare(t *testing.T) {
|
||||
a := []bench.Benchmark{
|
||||
{Name: "MyBenchmark1", Duration: time.Nanosecond * 1714393},
|
||||
{Name: "MyBenchmark0", Duration: time.Nanosecond * 1639227},
|
||||
{Name: "MyBenchmark3", Duration: time.Nanosecond * 1676810},
|
||||
{Name: "MyBenchmark4", Duration: time.Nanosecond * 53150},
|
||||
{Name: "MyBenchmark2", Duration: time.Nanosecond * 1676810},
|
||||
}
|
||||
b := []bench.Benchmark{
|
||||
{Name: "MyBenchmark1", Duration: time.Nanosecond * 56747654},
|
||||
{Name: "MyBenchmark0", Duration: time.Nanosecond * 236246},
|
||||
{Name: "MyBenchmark2", Duration: time.Nanosecond * 675865},
|
||||
{Name: "MyBenchmark4", Duration: time.Nanosecond * 2352336},
|
||||
{Name: "MyBenchmark3", Duration: time.Nanosecond * 87657868},
|
||||
}
|
||||
|
||||
minDiff := time.Millisecond * 2
|
||||
minRelDiff := 35.0
|
||||
|
||||
cmp := bench.Compare(a, b, minDiff, minRelDiff)
|
||||
|
||||
expectEqual(t, "bench.Compare().Format", cmp.Format(bench.DiffFormat{}), "")
|
||||
expectEqual(t, "bench.Compare().Format", "\n"+cmp.Format(bench.DiffFormat{TimeA: true}), `
|
||||
| A |
|
||||
|-----------|
|
||||
| 1.67681ms |
|
||||
| 53.15µs |
|
||||
`)
|
||||
expectEqual(t, "bench.Compare().Format", "\n"+cmp.Format(bench.DiffFormat{TimeA: true, TimeB: true}), `
|
||||
| A | B |
|
||||
|-----------+-------------|
|
||||
| 1.67681ms | 87.657868ms |
|
||||
| 53.15µs | 2.352336ms |
|
||||
`)
|
||||
expectEqual(t, "bench.Compare().Format", "\n"+cmp.Format(bench.DiffFormat{
|
||||
TestName: true,
|
||||
Delta: true,
|
||||
PercentChangeAB: true,
|
||||
TimeA: true,
|
||||
TimeB: true,
|
||||
}), `
|
||||
| Test name | Δ (A → B) | % (A → B) | A | B |
|
||||
|--------------+-------------+-----------+-----------+-------------|
|
||||
| MyBenchmark3 | 85.981058ms | +5127.7% | 1.67681ms | 87.657868ms |
|
||||
| MyBenchmark4 | 2.299186ms | +4325.8% | 53.15µs | 2.352336ms |
|
||||
`)
|
||||
expectEqual(t, "bench.Compare().Format", "\n"+cmp.Format(bench.DiffFormat{
|
||||
TestName: true,
|
||||
Delta: true,
|
||||
PercentChangeAB: true,
|
||||
PercentChangeBA: true,
|
||||
MultiplierChangeAB: true,
|
||||
MultiplierChangeBA: true,
|
||||
TimeA: true,
|
||||
TimeB: true,
|
||||
}), `
|
||||
| Test name | Δ (A → B) | % (A → B) | % (B → A) | × (A → B) | × (B → A) | A | B |
|
||||
|--------------+-------------+-----------+-----------+-----------+-----------+-----------+-------------|
|
||||
| MyBenchmark3 | 85.981058ms | +5127.7% | -98.1% | +52.2766 | +0.0191 | 1.67681ms | 87.657868ms |
|
||||
| MyBenchmark4 | 2.299186ms | +4325.8% | -97.7% | +44.2584 | +0.0226 | 53.15µs | 2.352336ms |
|
||||
`)
|
||||
}
|
||||
|
||||
func expectEqual(t *testing.T, desc string, got, expect interface{}) {
|
||||
if !reflect.DeepEqual(got, expect) {
|
||||
t.Errorf("%v was not expected:\nGot:\n%+v\nExpected:\n%+v", desc, got, expect)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user