Add Dawn perf test harness

This patch adds a perf test harness for Dawn and a simple test of
buffer upload performance. The test harness is based off of ANGLE's
perf tests.

Because perf tests are parameterized to support multiple test
variants, this patch also adds DawnTestWithParams and ParamGenerator
to support instantiating tests with additional parameters.

Bug: dawn:208
Change-Id: I60df730e9f9f21a4c29fc21ea1a8315e4fff1aa6
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/10340
Reviewed-by: Austin Eng <enga@chromium.org>
Commit-Queue: Austin Eng <enga@chromium.org>
This commit is contained in:
Austin Eng 2019-08-28 23:18:10 +00:00 committed by Commit Bot service account
parent 650859b420
commit ca0eac314b
13 changed files with 1200 additions and 71 deletions

View File

@ -573,8 +573,17 @@ static_library("dawn_utils") {
"src/utils/SystemUtils.h",
"src/utils/TerribleCommandBuffer.cpp",
"src/utils/TerribleCommandBuffer.h",
"src/utils/Timer.h",
]
if (is_win) {
sources += [ "src/utils/WindowsTimer.cpp" ]
} else if (is_mac) {
sources += [ "src/utils/OSXTimer.cpp" ]
} else {
sources += [ "src/utils/PosixTimer.cpp" ]
}
public_deps = [
"${dawn_root}/src/dawn:dawn_headers",
]
@ -873,6 +882,46 @@ test("dawn_end2end_tests") {
}
}
test("dawn_perf_tests") {
configs += [ "${dawn_root}/src/common:dawn_internal" ]
deps = [
":dawn_utils",
":libdawn_native",
":libdawn_wire",
"${dawn_root}/src/common",
"${dawn_root}/src/dawn:libdawn",
"third_party:gmock_and_gtest",
]
sources = [
"src/tests/DawnTest.cpp",
"src/tests/DawnTest.h",
"src/tests/ParamGenerator.h",
"src/tests/perf_tests/BufferUploadPerf.cpp",
"src/tests/perf_tests/DawnPerfTest.cpp",
"src/tests/perf_tests/DawnPerfTest.h",
]
libs = []
# When building inside Chromium, use their gtest main function because it is
# needed to run in swarming correctly.
if (build_with_chromium) {
sources += [ "//gpu/dawn_perf_tests_main.cc" ]
} else {
sources += [ "src/tests/PerfTestsMain.cpp" ]
}
if (dawn_enable_metal) {
libs += [ "IOSurface.framework" ]
}
if (dawn_enable_opengl) {
deps += [ "third_party:glfw" ]
}
}
# Temporary groups to make a 5-way patch to fix crbug.com/913171
group("dawn_unittests_temp_group") {
testonly = true
@ -888,6 +937,13 @@ group("dawn_end2end_tests_temp_group") {
]
}
group("dawn_perf_tests_temp_group") {
testonly = true
deps = [
":dawn_perf_tests",
]
}
###############################################################################
# Dawn samples, only in standalone builds
###############################################################################

149
scripts/perf_test_runner.py Executable file
View File

@ -0,0 +1,149 @@
#!/usr/bin/python2
#
# Copyright 2019 The Dawn Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on Angle's perf_test_runner.py
import glob
import subprocess
import sys
import os
import re
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
# Look for a [Rr]elease build.
perftests_paths = glob.glob('out/*elease*')
metric = 'wall_time'
max_experiments = 10
binary_name = 'dawn_perf_tests'
if sys.platform == 'win32':
binary_name += '.exe'
scores = []
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return float(sum(data)) / float(n) # in Python 2 use sum(data)/float(n)
def sum_of_square_deviations(data, c):
"""Return sum of square deviations of sequence data."""
ss = sum((float(x) - c)**2 for x in data)
return ss
def coefficient_of_variation(data):
"""Calculates the population coefficient of variation."""
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
c = mean(data)
ss = sum_of_square_deviations(data, c)
pvar = ss / n # the population variance
stddev = (pvar**0.5) # population standard deviation
return stddev / c
def truncated_list(data, n):
"""Compute a truncated list, n is truncation size"""
if len(data) < n * 2:
raise ValueError('list not large enough to truncate')
return sorted(data)[n:-n]
def truncated_mean(data, n):
"""Compute a truncated mean, n is truncation size"""
return mean(truncated_list(data, n))
def truncated_cov(data, n):
"""Compute a truncated coefficient of variation, n is truncation size"""
return coefficient_of_variation(truncated_list(data, n))
# Find most recent binary
newest_binary = None
newest_mtime = None
for path in perftests_paths:
binary_path = os.path.join(base_path, path, binary_name)
if os.path.exists(binary_path):
binary_mtime = os.path.getmtime(binary_path)
if (newest_binary is None) or (binary_mtime > newest_mtime):
newest_binary = binary_path
newest_mtime = binary_mtime
perftests_path = newest_binary
if perftests_path == None or not os.path.exists(perftests_path):
print('Cannot find Release %s!' % binary_name)
sys.exit(1)
if len(sys.argv) >= 2:
test_name = sys.argv[1]
print('Using test executable: ' + perftests_path)
print('Test name: ' + test_name)
def get_results(metric, extra_args=[]):
process = subprocess.Popen(
[perftests_path, '--gtest_filter=' + test_name] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = process.communicate()
m = re.search(r'Running (\d+) tests', output)
if m and int(m.group(1)) > 1:
print("Found more than one test result in output:")
print(output)
sys.exit(3)
pattern = metric + r'= ([0-9.]+)'
m = re.findall(pattern, output)
if m is None:
print("Did not find the metric '%s' in the test output:" % metric)
print(output)
sys.exit(1)
return [float(value) for value in m]
# Calibrate the number of steps
steps = get_results("steps", ["--calibration"])[0]
print("running with %d steps." % steps)
# Loop 'max_experiments' times, running the tests.
for experiment in range(max_experiments):
experiment_scores = get_results(metric, ["--override-steps", str(steps)])
for score in experiment_scores:
sys.stdout.write("%s: %.2f" % (metric, score))
scores.append(score)
if (len(scores) > 1):
sys.stdout.write(", mean: %.2f" % mean(scores))
sys.stdout.write(", variation: %.2f%%" % (coefficient_of_variation(scores) * 100.0))
if (len(scores) > 7):
truncation_n = len(scores) >> 3
sys.stdout.write(", truncated mean: %.2f" % truncated_mean(scores, truncation_n))
sys.stdout.write(", variation: %.2f%%" % (truncated_cov(scores, truncation_n) * 100.0))
print("")

View File

@ -71,7 +71,7 @@ namespace {
}
struct MapReadUserdata {
DawnTest* test;
DawnTestBase* test;
size_t slot;
};
@ -154,6 +154,11 @@ DawnTestEnvironment::DawnTestEnvironment(int argc, char** argv) {
}
}
// static
void DawnTestEnvironment::SetEnvironment(DawnTestEnvironment* env) {
gTestEnv = env;
}
void DawnTestEnvironment::SetUp() {
mInstance = std::make_unique<dawn_native::Instance>();
mInstance->EnableBackendValidation(mEnableBackendValidation);
@ -252,9 +257,10 @@ void DawnTestEnvironment::DiscoverOpenGLAdapter() {
// Implementation of DawnTest
DawnTest::DawnTest() = default;
DawnTestBase::DawnTestBase(const DawnTestParam& param) : mParam(param) {
}
DawnTest::~DawnTest() {
DawnTestBase::~DawnTestBase() {
// We need to destroy child objects before the Device
mReadbackSlots.clear();
queue = dawn::Queue();
@ -269,47 +275,47 @@ DawnTest::~DawnTest() {
dawnSetProcs(nullptr);
}
bool DawnTest::IsD3D12() const {
return GetParam().backendType == dawn_native::BackendType::D3D12;
bool DawnTestBase::IsD3D12() const {
return mParam.backendType == dawn_native::BackendType::D3D12;
}
bool DawnTest::IsMetal() const {
return GetParam().backendType == dawn_native::BackendType::Metal;
bool DawnTestBase::IsMetal() const {
return mParam.backendType == dawn_native::BackendType::Metal;
}
bool DawnTest::IsOpenGL() const {
return GetParam().backendType == dawn_native::BackendType::OpenGL;
bool DawnTestBase::IsOpenGL() const {
return mParam.backendType == dawn_native::BackendType::OpenGL;
}
bool DawnTest::IsVulkan() const {
return GetParam().backendType == dawn_native::BackendType::Vulkan;
bool DawnTestBase::IsVulkan() const {
return mParam.backendType == dawn_native::BackendType::Vulkan;
}
bool DawnTest::IsAMD() const {
bool DawnTestBase::IsAMD() const {
return mPCIInfo.vendorId == kVendorID_AMD;
}
bool DawnTest::IsARM() const {
bool DawnTestBase::IsARM() const {
return mPCIInfo.vendorId == kVendorID_ARM;
}
bool DawnTest::IsImgTec() const {
bool DawnTestBase::IsImgTec() const {
return mPCIInfo.vendorId == kVendorID_ImgTec;
}
bool DawnTest::IsIntel() const {
bool DawnTestBase::IsIntel() const {
return mPCIInfo.vendorId == kVendorID_Intel;
}
bool DawnTest::IsNvidia() const {
bool DawnTestBase::IsNvidia() const {
return mPCIInfo.vendorId == kVendorID_Nvidia;
}
bool DawnTest::IsQualcomm() const {
bool DawnTestBase::IsQualcomm() const {
return mPCIInfo.vendorId == kVendorID_Qualcomm;
}
bool DawnTest::IsWindows() const {
bool DawnTestBase::IsWindows() const {
#ifdef DAWN_PLATFORM_WINDOWS
return true;
#else
@ -317,7 +323,7 @@ bool DawnTest::IsWindows() const {
#endif
}
bool DawnTest::IsLinux() const {
bool DawnTestBase::IsLinux() const {
#ifdef DAWN_PLATFORM_LINUX
return true;
#else
@ -325,7 +331,7 @@ bool DawnTest::IsLinux() const {
#endif
}
bool DawnTest::IsMacOS() const {
bool DawnTestBase::IsMacOS() const {
#ifdef DAWN_PLATFORM_APPLE
return true;
#else
@ -333,29 +339,29 @@ bool DawnTest::IsMacOS() const {
#endif
}
bool DawnTest::UsesWire() const {
bool DawnTestBase::UsesWire() const {
return gTestEnv->UsesWire();
}
bool DawnTest::IsBackendValidationEnabled() const {
bool DawnTestBase::IsBackendValidationEnabled() const {
return gTestEnv->IsBackendValidationEnabled();
}
bool DawnTest::HasVendorIdFilter() const {
bool DawnTestBase::HasVendorIdFilter() const {
return gTestEnv->HasVendorIdFilter();
}
uint32_t DawnTest::GetVendorIdFilter() const {
uint32_t DawnTestBase::GetVendorIdFilter() const {
return gTestEnv->GetVendorIdFilter();
}
std::vector<const char*> DawnTest::GetRequiredExtensions() {
std::vector<const char*> DawnTestBase::GetRequiredExtensions() {
return {};
}
// This function can only be called after SetUp() because it requires mBackendAdapter to be
// initialized.
bool DawnTest::SupportsExtensions(const std::vector<const char*>& extensions) {
bool DawnTestBase::SupportsExtensions(const std::vector<const char*>& extensions) {
ASSERT(mBackendAdapter);
std::set<std::string> supportedExtensionsSet;
@ -372,9 +378,9 @@ bool DawnTest::SupportsExtensions(const std::vector<const char*>& extensions) {
return true;
}
void DawnTest::SetUp() {
void DawnTestBase::SetUp() {
// Initialize mBackendAdapter, and create the device.
const dawn_native::BackendType backendType = GetParam().backendType;
const dawn_native::BackendType backendType = mParam.backendType;
{
dawn_native::Instance* instance = gTestEnv->GetInstance();
std::vector<dawn_native::Adapter> adapters = instance->GetAdapters();
@ -406,15 +412,15 @@ void DawnTest::SetUp() {
mPCIInfo = mBackendAdapter.GetPCIInfo();
for (const char* forceEnabledWorkaround : GetParam().forceEnabledWorkarounds) {
for (const char* forceEnabledWorkaround : mParam.forceEnabledWorkarounds) {
ASSERT(gTestEnv->GetInstance()->GetToggleInfo(forceEnabledWorkaround) != nullptr);
}
for (const char* forceDisabledWorkaround : GetParam().forceDisabledWorkarounds) {
for (const char* forceDisabledWorkaround : mParam.forceDisabledWorkarounds) {
ASSERT(gTestEnv->GetInstance()->GetToggleInfo(forceDisabledWorkaround) != nullptr);
}
dawn_native::DeviceDescriptor deviceDescriptor;
deviceDescriptor.forceEnabledToggles = GetParam().forceEnabledWorkarounds;
deviceDescriptor.forceDisabledToggles = GetParam().forceDisabledWorkarounds;
deviceDescriptor.forceEnabledToggles = mParam.forceEnabledWorkarounds;
deviceDescriptor.forceDisabledToggles = mParam.forceDisabledWorkarounds;
deviceDescriptor.requiredExtensions = GetRequiredExtensions();
backendDevice = mBackendAdapter.CreateDevice(&deviceDescriptor);
ASSERT_NE(nullptr, backendDevice);
@ -452,8 +458,8 @@ void DawnTest::SetUp() {
cDevice = backendDevice;
}
// Set up the device and queue because all tests need them, and DawnTest needs them too for the
// deferred expectations.
// Set up the device and queue because all tests need them, and DawnTestBase needs them too for
// the deferred expectations.
dawnSetProcs(&procs);
device = dawn::Device::Acquire(cDevice);
queue = device.CreateQueue();
@ -461,7 +467,7 @@ void DawnTest::SetUp() {
device.SetUncapturedErrorCallback(OnDeviceError, this);
}
void DawnTest::TearDown() {
void DawnTestBase::TearDown() {
FlushWire();
MapSlotsSynchronously();
@ -472,26 +478,26 @@ void DawnTest::TearDown() {
}
}
void DawnTest::StartExpectDeviceError() {
void DawnTestBase::StartExpectDeviceError() {
mExpectError = true;
mError = false;
}
bool DawnTest::EndExpectDeviceError() {
bool DawnTestBase::EndExpectDeviceError() {
mExpectError = false;
return mError;
}
// static
void DawnTest::OnDeviceError(DawnErrorType type, const char* message, void* userdata) {
void DawnTestBase::OnDeviceError(DawnErrorType type, const char* message, void* userdata) {
ASSERT(type != DAWN_ERROR_TYPE_NO_ERROR);
DawnTest* self = static_cast<DawnTest*>(userdata);
DawnTestBase* self = static_cast<DawnTestBase*>(userdata);
ASSERT_TRUE(self->mExpectError) << "Got unexpected device error: " << message;
ASSERT_FALSE(self->mError) << "Got two errors in expect block";
self->mError = true;
}
std::ostringstream& DawnTest::AddBufferExpectation(const char* file,
std::ostringstream& DawnTestBase::AddBufferExpectation(const char* file,
int line,
const dawn::Buffer& buffer,
uint64_t offset,
@ -522,7 +528,7 @@ std::ostringstream& DawnTest::AddBufferExpectation(const char* file,
return *(mDeferredExpectations.back().message.get());
}
std::ostringstream& DawnTest::AddTextureExpectation(const char* file,
std::ostringstream& DawnTestBase::AddTextureExpectation(const char* file,
int line,
const dawn::Texture& texture,
uint32_t x,
@ -567,14 +573,14 @@ std::ostringstream& DawnTest::AddTextureExpectation(const char* file,
return *(mDeferredExpectations.back().message.get());
}
void DawnTest::WaitABit() {
void DawnTestBase::WaitABit() {
device.Tick();
FlushWire();
utils::USleep(100);
}
void DawnTest::FlushWire() {
void DawnTestBase::FlushWire() {
if (gTestEnv->UsesWire()) {
bool C2SFlushed = mC2sBuf->Flush();
bool S2CFlushed = mS2cBuf->Flush();
@ -583,7 +589,7 @@ void DawnTest::FlushWire() {
}
}
DawnTest::ReadbackReservation DawnTest::ReserveReadback(uint64_t readbackSize) {
DawnTestBase::ReadbackReservation DawnTestBase::ReserveReadback(uint64_t readbackSize) {
// For now create a new MapRead buffer for each readback
// TODO(cwallez@chromium.org): eventually make bigger buffers and allocate linearly?
dawn::BufferDescriptor descriptor;
@ -603,7 +609,7 @@ DawnTest::ReadbackReservation DawnTest::ReserveReadback(uint64_t readbackSize) {
return reservation;
}
void DawnTest::MapSlotsSynchronously() {
void DawnTestBase::MapSlotsSynchronously() {
// Initialize numPendingMapOperations before mapping, just in case the callback is called
// immediately.
mNumPendingMapOperations = mReadbackSlots.size();
@ -623,7 +629,7 @@ void DawnTest::MapSlotsSynchronously() {
}
// static
void DawnTest::SlotMapReadCallback(DawnBufferMapAsyncStatus status,
void DawnTestBase::SlotMapReadCallback(DawnBufferMapAsyncStatus status,
const void* data,
uint64_t,
void* userdata_) {
@ -636,7 +642,7 @@ void DawnTest::SlotMapReadCallback(DawnBufferMapAsyncStatus status,
delete userdata;
}
void DawnTest::ResolveExpectations() {
void DawnTestBase::ResolveExpectations() {
for (const auto& expectation : mDeferredExpectations) {
DAWN_ASSERT(mReadbackSlots[expectation.readbackSlot].mappedData != nullptr);

View File

@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESTS_DAWNTEST_H_
#define TESTS_DAWNTEST_H_
#include "dawn/dawncpp.h"
#include "dawn_native/DawnNative.h"
@ -115,6 +118,8 @@ class DawnTestEnvironment : public testing::Environment {
DawnTestEnvironment(int argc, char** argv);
~DawnTestEnvironment() = default;
static void SetEnvironment(DawnTestEnvironment* env);
void SetUp() override;
bool UsesWire() const;
@ -134,13 +139,15 @@ class DawnTestEnvironment : public testing::Environment {
std::unique_ptr<dawn_native::Instance> mInstance;
};
class DawnTest : public ::testing::TestWithParam<DawnTestParam> {
public:
DawnTest();
~DawnTest();
class DawnTestBase {
friend class DawnPerfTestBase;
void SetUp() override;
void TearDown() override;
public:
DawnTestBase(const DawnTestParam& param);
virtual ~DawnTestBase();
void SetUp();
void TearDown();
bool IsD3D12() const;
bool IsMetal() const;
@ -205,6 +212,8 @@ class DawnTest : public ::testing::TestWithParam<DawnTestParam> {
virtual std::vector<const char*> GetRequiredExtensions();
private:
DawnTestParam mParam;
// Things used to set up testing through the Wire.
std::unique_ptr<dawn_wire::WireServer> mWireServer;
std::unique_ptr<dawn_wire::WireClient> mWireClient;
@ -263,6 +272,27 @@ class DawnTest : public ::testing::TestWithParam<DawnTestParam> {
dawn_native::Adapter mBackendAdapter;
};
template <typename Params = DawnTestParam>
class DawnTestWithParams : public DawnTestBase, public ::testing::TestWithParam<Params> {
protected:
DawnTestWithParams();
~DawnTestWithParams() override = default;
void SetUp() override {
DawnTestBase::SetUp();
}
void TearDown() override {
DawnTestBase::TearDown();
}
};
template <typename Params>
DawnTestWithParams<Params>::DawnTestWithParams() : DawnTestBase(this->GetParam()) {
}
using DawnTest = DawnTestWithParams<>;
// Instantiate the test once for each backend provided after the first argument. Use it like this:
// DAWN_INSTANTIATE_TEST(MyTestFixture, MetalBackend, OpenGLBackend)
#define DAWN_INSTANTIATE_TEST(testName, firstParam, ...) \
@ -310,3 +340,5 @@ namespace detail {
extern template class ExpectEq<uint32_t>;
extern template class ExpectEq<RGBA8>;
} // namespace detail
#endif // TESTS_DAWNTEST_H_

129
src/tests/ParamGenerator.h Normal file
View File

@ -0,0 +1,129 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESTS_PARAMGENERATOR_H_
#define TESTS_PARAMGENERATOR_H_
#include <tuple>
#include <vector>
// ParamStruct is a custom struct which ParamStruct will yield when iterating.
// The types Params... should by the same as the types passed to the constructor
// of ParamStruct.
template <typename ParamStruct, typename... Params>
class ParamGenerator {
using ParamTuple = std::tuple<std::vector<Params>...>;
using Index = std::array<size_t, sizeof...(Params)>;
static constexpr auto s_indexSequence = std::make_index_sequence<sizeof...(Params)>{};
// Default template that returns the same params.
template <typename P>
static std::vector<P> FilterBackends(std::vector<P> params) {
return params;
}
// Template specialization for DawnTestParam that filters the backends by
// those supported.
template <>
static std::vector<DawnTestParam> FilterBackends(std::vector<DawnTestParam> params) {
return ::detail::FilterBackends(params.data(), params.size());
}
// Using an N-dimensional Index, extract params from ParamTuple and pass
// them to the constructor of ParamStruct.
template <size_t... Is>
static ParamStruct GetParam(const ParamTuple& params,
const Index& index,
std::index_sequence<Is...>) {
return ParamStruct(std::get<Is>(params)[std::get<Is>(index)]...);
}
// Get the last value index into a ParamTuple.
template <size_t... Is>
static Index GetLastIndex(const ParamTuple& params, std::index_sequence<Is...>) {
return Index{std::get<Is>(params).size() - 1 ...};
}
public:
using value_type = ParamStruct;
ParamGenerator(std::vector<Params>... params) : mParams(FilterBackends(params)...) {
}
class Iterator : public std::iterator<std::forward_iterator_tag, ParamStruct, size_t> {
public:
Iterator& operator++() {
// Increment the Index by 1. If the i'th place reaches the maximum,
// reset it to 0 and continue with the i+1'th place.
for (int i = mIndex.size() - 1; i >= 0; --i) {
if (mIndex[i] >= mLastIndex[i]) {
mIndex[i] = 0;
} else {
mIndex[i]++;
return *this;
}
}
// Set a marker that the iterator has reached the end.
mEnd = true;
return *this;
}
bool operator==(const Iterator& other) const {
return mEnd == other.mEnd && mIndex == other.mIndex;
}
bool operator!=(const Iterator& other) const {
return !(*this == other);
}
ParamStruct operator*() const {
return GetParam(mParams, mIndex, s_indexSequence);
}
private:
friend class ParamGenerator;
Iterator(ParamTuple params, Index index)
: mParams(params), mIndex(index), mLastIndex{GetLastIndex(params, s_indexSequence)} {
}
ParamTuple mParams;
Index mIndex;
Index mLastIndex;
bool mEnd = false;
};
Iterator begin() const {
return Iterator(mParams, {});
}
Iterator end() const {
Iterator iter(mParams, GetLastIndex(mParams, s_indexSequence));
++iter;
return iter;
}
private:
ParamTuple mParams;
};
template <typename Param, typename... Params>
auto MakeParamGenerator(std::initializer_list<Params>&&... params) {
return ParamGenerator<Param, Params...>(
std::forward<std::initializer_list<Params>&&>(params)...);
}
#endif // TESTS_PARAMGENERATOR_H_

View File

@ -0,0 +1,21 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "tests/perf_tests/DawnPerfTest.h"
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
InitDawnPerfTestEnvironment(argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,122 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "tests/perf_tests/DawnPerfTest.h"
#include "tests/ParamGenerator.h"
#include "utils/DawnHelpers.h"
namespace {
constexpr unsigned int kNumIterations = 50;
constexpr uint32_t kBufferSize = 1024 * 1024;
enum class UploadMethod {
SetSubData,
CreateBufferMapped,
};
struct BufferUploadParams : DawnTestParam {
BufferUploadParams(const DawnTestParam& param, UploadMethod uploadMethod)
: DawnTestParam(param), uploadMethod(uploadMethod) {
}
UploadMethod uploadMethod;
};
std::ostream& operator<<(std::ostream& ostream, const BufferUploadParams& param) {
ostream << static_cast<const DawnTestParam&>(param);
switch (param.uploadMethod) {
case UploadMethod::SetSubData:
ostream << "_SetSubData";
break;
case UploadMethod::CreateBufferMapped:
ostream << "_CreateBufferMapped";
break;
}
return ostream;
}
} // namespace
// Test uploading |kBufferSize| bytes of data |kNumIterations| times.
class BufferUploadPerf : public DawnPerfTestWithParams<BufferUploadParams> {
public:
BufferUploadPerf() : DawnPerfTestWithParams(kNumIterations), data(kBufferSize) {
}
~BufferUploadPerf() override = default;
void SetUp() override;
private:
void Step() override;
dawn::Buffer dst;
std::vector<uint8_t> data;
};
void BufferUploadPerf::SetUp() {
DawnPerfTestWithParams<BufferUploadParams>::SetUp();
dawn::BufferDescriptor desc = {};
desc.size = kBufferSize;
desc.usage = dawn::BufferUsage::CopyDst;
dst = device.CreateBuffer(&desc);
}
void BufferUploadPerf::Step() {
switch (GetParam().uploadMethod) {
case UploadMethod::SetSubData: {
for (unsigned int i = 0; i < kNumIterations; ++i) {
dst.SetSubData(0, kBufferSize, data.data());
}
// Make sure all SetSubData's are flushed.
queue.Submit(0, nullptr);
} break;
case UploadMethod::CreateBufferMapped: {
dawn::BufferDescriptor desc = {};
desc.size = kBufferSize;
desc.usage = dawn::BufferUsage::CopySrc | dawn::BufferUsage::MapWrite;
dawn::CommandEncoder encoder = device.CreateCommandEncoder();
for (unsigned int i = 0; i < kNumIterations; ++i) {
auto result = device.CreateBufferMapped(&desc);
memcpy(result.data, data.data(), kBufferSize);
result.buffer.Unmap();
encoder.CopyBufferToBuffer(result.buffer, 0, dst, 0, kBufferSize);
}
dawn::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
} break;
}
// Wait for the GPU in this batch of iterations.
// If we don't wait, we can't properly compute the number of steps to run during
// calibration.
// The wait time gets amortized over the kNumIterations.
WaitForGPU();
}
TEST_P(BufferUploadPerf, Run) {
RunTest();
}
DAWN_INSTANTIATE_PERF_TEST_SUITE_P(BufferUploadPerf,
{D3D12Backend, MetalBackend, OpenGLBackend, VulkanBackend},
{UploadMethod::SetSubData, UploadMethod::CreateBufferMapped});

View File

@ -0,0 +1,220 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "tests/perf_tests/DawnPerfTest.h"
#include "utils/Timer.h"
namespace {
DawnPerfTestEnvironment* gTestEnv = nullptr;
constexpr double kMicroSecondsPerSecond = 1e6;
constexpr double kNanoSecondsPerSecond = 1e9;
} // namespace
void InitDawnPerfTestEnvironment(int argc, char** argv) {
gTestEnv = new DawnPerfTestEnvironment(argc, argv);
DawnTestEnvironment::SetEnvironment(gTestEnv);
testing::AddGlobalTestEnvironment(gTestEnv);
}
DawnPerfTestEnvironment::DawnPerfTestEnvironment(int argc, char** argv)
: DawnTestEnvironment(argc, argv) {
for (int i = 1; i < argc; ++i) {
if (strcmp("--calibration", argv[i]) == 0) {
mIsCalibrating = true;
continue;
}
if (strcmp("--override-steps", argv[i]) == 0) {
const char* value = strchr(argv[i], '=');
if (value != nullptr) {
mOverrideStepsToRun = strtoul(value + 1, nullptr, 0);
}
continue;
}
if (strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
std::cout << "Additional flags:"
<< " [--calibration]\n"
<< " --calibration: Only run calibration. Calibration allows the perf test"
" runner script to save some time.\n"
<< std::endl;
continue;
}
}
}
DawnPerfTestEnvironment::~DawnPerfTestEnvironment() = default;
void DawnPerfTestEnvironment::SetUp() {
DawnTestEnvironment::SetUp();
}
bool DawnPerfTestEnvironment::IsCalibrating() const {
return mIsCalibrating;
}
unsigned int DawnPerfTestEnvironment::OverrideStepsToRun() const {
return mOverrideStepsToRun;
}
DawnPerfTestBase::DawnPerfTestBase(DawnTestBase* test, unsigned int iterationsPerStep)
: mTest(test), mIterationsPerStep(iterationsPerStep), mTimer(utils::CreateTimer()) {
}
DawnPerfTestBase::~DawnPerfTestBase() = default;
void DawnPerfTestBase::AbortTest() {
mRunning = false;
}
void DawnPerfTestBase::WaitForGPU() {
dawn::FenceDescriptor desc = {};
desc.initialValue = 0;
dawn::Fence fence = mTest->queue.CreateFence(&desc);
mTest->queue.Signal(fence, 1);
bool done = false;
fence.OnCompletion(1,
[](DawnFenceCompletionStatus status, void* userdata) {
ASSERT_EQ(status, DAWN_FENCE_COMPLETION_STATUS_SUCCESS);
*reinterpret_cast<bool*>(userdata) = true;
},
&done);
while (!done) {
mTest->WaitABit();
}
}
void DawnPerfTestBase::RunTest() {
if (gTestEnv->OverrideStepsToRun() == 0) {
// Run to compute the approximate number of steps to perform.
mStepsToRun = std::numeric_limits<unsigned int>::max();
// Do a warmup run for calibration.
DoRunLoop(kCalibrationRunTimeSeconds);
DoRunLoop(kCalibrationRunTimeSeconds);
// Scale steps down according to the time that exceeded one second.
double scale = kCalibrationRunTimeSeconds / mTimer->GetElapsedTime();
mStepsToRun = static_cast<unsigned int>(static_cast<double>(mNumStepsPerformed) * scale);
// Calibration allows the perf test runner script to save some time.
if (gTestEnv->IsCalibrating()) {
PrintResult("steps", mStepsToRun, "count", false);
return;
}
} else {
mStepsToRun = gTestEnv->OverrideStepsToRun();
}
// Do another warmup run. Seems to consistently improve results.
DoRunLoop(kMaximumRunTimeSeconds);
for (unsigned int trial = 0; trial < kNumTrials; ++trial) {
DoRunLoop(kMaximumRunTimeSeconds);
PrintResults();
}
}
void DawnPerfTestBase::DoRunLoop(double maxRunTime) {
mNumStepsPerformed = 0;
mRunning = true;
mTimer->Start();
// This loop can be canceled by calling AbortTest().
while (mRunning) {
Step();
if (mRunning) {
++mNumStepsPerformed;
if (mTimer->GetElapsedTime() > maxRunTime) {
mRunning = false;
} else if (mNumStepsPerformed >= mStepsToRun) {
mRunning = false;
}
}
}
mTimer->Stop();
}
void DawnPerfTestBase::PrintResults() {
double elapsedTimeSeconds[2] = {
mTimer->GetElapsedTime(),
mGPUTimeNs * 1e-9,
};
const char* clockNames[2] = {
"wall_time",
"gpu_time",
};
// If measured gpu time is non-zero, print that too.
unsigned int clocksToOutput = mGPUTimeNs > 0 ? 2 : 1;
for (unsigned int i = 0; i < clocksToOutput; ++i) {
double secondsPerStep = elapsedTimeSeconds[i] / static_cast<double>(mNumStepsPerformed);
double secondsPerIteration = secondsPerStep / static_cast<double>(mIterationsPerStep);
// Give the result a different name to ensure separate graphs if we transition.
if (secondsPerIteration > 1e-3) {
double microSecondsPerIteration = secondsPerIteration * kMicroSecondsPerSecond;
PrintResult(clockNames[i], microSecondsPerIteration, "us", true);
} else {
double nanoSecPerIteration = secondsPerIteration * kNanoSecondsPerSecond;
PrintResult(clockNames[i], nanoSecPerIteration, "ns", true);
}
}
}
void DawnPerfTestBase::PrintResult(const std::string& trace,
double value,
const std::string& units,
bool important) const {
const ::testing::TestInfo* const testInfo =
::testing::UnitTest::GetInstance()->current_test_info();
const char* testName = testInfo->name();
const char* testSuite = testInfo->test_suite_name();
// The results are printed according to the format specified at
// [chromium]//build/scripts/slave/performance_log_processor.py
fflush(stdout);
printf("%sRESULT %s%s: %s= %s%f%s %s\n", important ? "*" : "", testSuite, testName,
trace.c_str(), "", value, "", units.c_str());
fflush(stdout);
}
void DawnPerfTestBase::PrintResult(const std::string& trace,
unsigned int value,
const std::string& units,
bool important) const {
const ::testing::TestInfo* const testInfo =
::testing::UnitTest::GetInstance()->current_test_info();
const char* testName = testInfo->name();
const char* testSuite = testInfo->test_suite_name();
// The results are printed according to the format specified at
// [chromium]//build/scripts/slave/performance_log_processor.py
fflush(stdout);
printf("%sRESULT %s%s: %s= %s%u%s %s\n", important ? "*" : "", testName, testSuite,
trace.c_str(), "", value, "", units.c_str());
fflush(stdout);
}

View File

@ -0,0 +1,113 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESTS_PERFTESTS_DAWNPERFTEST_H_
#define TESTS_PERFTESTS_DAWNPERFTEST_H_
#include "tests/DawnTest.h"
namespace utils {
class Timer;
}
void InitDawnPerfTestEnvironment(int argc, char** argv);
class DawnPerfTestEnvironment : public DawnTestEnvironment {
public:
DawnPerfTestEnvironment(int argc, char** argv);
~DawnPerfTestEnvironment();
void SetUp() override;
bool IsCalibrating() const;
unsigned int OverrideStepsToRun() const;
private:
// Only run calibration which allows the perf test runner to save time.
bool mIsCalibrating = false;
// If non-zero, overrides the number of steps.
unsigned int mOverrideStepsToRun = 0;
};
// Dawn Perf Tests calls Step() of a derived class to measure its execution
// time. First, a calibration step is run which determines the number of times
// to call Step() to last approximately |kCalibrationRunTimeSeconds|. Then,
// Step() is called for the computed number of times, or until
// |kMaximumRunTimeSeconds| is exceeded. |kNumTrials| are performed and the
// results and averages per iteration** are printed.
//
// The results are printed according to the format specified at
// [chromium]//build/scripts/slave/performance_log_processor.py
//
// ** The number of iterations a test performs should be passed to the
// constructor of DawnPerfTestBase. The reported times are the total time
// divided by (numSteps * iterationsPerStep).
class DawnPerfTestBase {
static constexpr double kCalibrationRunTimeSeconds = 1.0;
static constexpr double kMaximumRunTimeSeconds = 10.0;
static constexpr unsigned int kNumTrials = 3;
public:
DawnPerfTestBase(DawnTestBase* test, unsigned int iterationsPerStep);
virtual ~DawnPerfTestBase();
protected:
// Call if the test step was aborted and the test should stop running.
void AbortTest();
void WaitForGPU();
void RunTest();
void PrintResult(const std::string& trace,
double value,
const std::string& units,
bool important) const;
void PrintResult(const std::string& trace,
unsigned int value,
const std::string& units,
bool important) const;
private:
void DoRunLoop(double maxRunTime);
void PrintResults();
virtual void Step() = 0;
DawnTestBase* mTest;
bool mRunning = false;
unsigned int mIterationsPerStep;
unsigned int mStepsToRun = 0;
unsigned int mNumStepsPerformed = 0;
uint64_t mGPUTimeNs = 0; // TODO(enga): Measure GPU time with timing queries.
std::unique_ptr<utils::Timer> mTimer;
};
template <typename Params = DawnTestParam>
class DawnPerfTestWithParams : public DawnTestWithParams<Params>, public DawnPerfTestBase {
protected:
DawnPerfTestWithParams(unsigned int iterationsPerStep)
: DawnTestWithParams<Params>(), DawnPerfTestBase(this, iterationsPerStep) {
}
~DawnPerfTestWithParams() override = default;
};
using DawnPerfTest = DawnPerfTestWithParams<>;
#define DAWN_INSTANTIATE_PERF_TEST_SUITE_P(testName, ...) \
INSTANTIATE_TEST_SUITE_P( \
, testName, ::testing::ValuesIn(MakeParamGenerator<testName::ParamType>(__VA_ARGS__)), \
testing::PrintToStringParamName())
#endif // TESTS_PERFTESTS_DAWNPERFTEST_H_

77
src/utils/OSXTimer.cpp Normal file
View File

@ -0,0 +1,77 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "utils/Timer.h"
#include <CoreServices/CoreServices.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
namespace utils {
class OSXTimer : public Timer {
public:
OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {
}
~OSXTimer() override = default;
void Start() override {
mStartTime = mach_absolute_time();
// Cache secondCoeff
GetSecondCoeff();
mRunning = true;
}
void Stop() override {
mStopTime = mach_absolute_time();
mRunning = false;
}
double GetElapsedTime() const override {
if (mRunning) {
return mSecondCoeff * (mach_absolute_time() - mStartTime);
} else {
return mSecondCoeff * (mStopTime - mStartTime);
}
}
double GetAbsoluteTime() override {
return GetSecondCoeff() * mach_absolute_time();
}
private:
double GetSecondCoeff() {
// If this is the first time we've run, get the timebase.
if (mSecondCoeff == 0.0) {
mach_timebase_info_data_t timebaseInfo;
mach_timebase_info(&timebaseInfo);
mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
}
return mSecondCoeff;
}
bool mRunning;
uint64_t mStartTime;
uint64_t mStopTime;
double mSecondCoeff;
};
Timer* CreateTimer() {
return new OSXTimer();
}
} // namespace utils

74
src/utils/PosixTimer.cpp Normal file
View File

@ -0,0 +1,74 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "utils/Timer.h"
#include <stdint.h>
#include <time.h>
namespace utils {
namespace {
uint64_t GetCurrentTimeNs() {
struct timespec currentTime;
clock_gettime(CLOCK_MONOTONIC, &currentTime);
return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
}
} // anonymous namespace
class PosixTimer : public Timer {
public:
PosixTimer() : Timer(), mRunning(false) {
}
~PosixTimer() override = default;
void Start() override {
mStartTimeNs = GetCurrentTimeNs();
mRunning = true;
}
void Stop() override {
mStopTimeNs = GetCurrentTimeNs();
mRunning = false;
}
double GetElapsedTime() const override {
uint64_t endTimeNs;
if (mRunning) {
endTimeNs = GetCurrentTimeNs();
} else {
endTimeNs = mStopTimeNs;
}
return (endTimeNs - mStartTimeNs) * 1e-9;
}
double GetAbsoluteTime() override {
return GetCurrentTimeNs() * 1e-9;
}
private:
bool mRunning;
uint64_t mStartTimeNs;
uint64_t mStopTimeNs;
};
Timer* CreateTimer() {
return new PosixTimer();
}
} // namespace utils

41
src/utils/Timer.h Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef UTILS_TIMER_H_
#define UTILS_TIMER_H_
namespace utils {
class Timer {
public:
virtual ~Timer() {
}
// Timer functionality: Use start() and stop() to record the duration and use
// getElapsedTime() to query that duration. If getElapsedTime() is called in between, it
// will report the elapsed time since start().
virtual void Start() = 0;
virtual void Stop() = 0;
virtual double GetElapsedTime() const = 0;
// Timestamp functionality: Use getAbsoluteTime() to get an absolute time with an unknown
// origin. This time moves forward regardless of start()/stop().
virtual double GetAbsoluteTime() = 0;
};
Timer* CreateTimer();
} // namespace utils
#endif // UTILS_TIMER_H_

View File

@ -0,0 +1,89 @@
// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "utils/Timer.h"
#include <windows.h>
namespace utils {
class WindowsTimer : public Timer {
public:
WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {
}
~WindowsTimer() override = default;
void Start() override {
LARGE_INTEGER curTime;
QueryPerformanceCounter(&curTime);
mStartTime = curTime.QuadPart;
// Cache the frequency
GetFrequency();
mRunning = true;
}
void Stop() override {
LARGE_INTEGER curTime;
QueryPerformanceCounter(&curTime);
mStopTime = curTime.QuadPart;
mRunning = false;
}
double GetElapsedTime() const override {
LONGLONG endTime;
if (mRunning) {
LARGE_INTEGER curTime;
QueryPerformanceCounter(&curTime);
endTime = curTime.QuadPart;
} else {
endTime = mStopTime;
}
return static_cast<double>(endTime - mStartTime) / mFrequency;
}
double GetAbsoluteTime() override {
LARGE_INTEGER curTime;
QueryPerformanceCounter(&curTime);
return static_cast<double>(curTime.QuadPart) / GetFrequency();
}
private:
LONGLONG GetFrequency() {
if (mFrequency == 0) {
LARGE_INTEGER frequency = {};
QueryPerformanceFrequency(&frequency);
mFrequency = frequency.QuadPart;
}
return mFrequency;
}
bool mRunning;
LONGLONG mStartTime;
LONGLONG mStopTime;
LONGLONG mFrequency;
};
Timer* CreateTimer() {
return new WindowsTimer();
}
} // namespace utils