Fix build/namespaces issues

This CL fixes up various build/namespaces lint errors and enables
the lint check.

Bug: dawn:1339
Change-Id: Ib2edd0019cb010e2c6226abce6cfee50a0b4b763
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87482
Kokoro: Kokoro <noreply+kokoro@google.com>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
This commit is contained in:
dan sinclair 2022-04-21 16:46:56 +00:00 committed by Dawn LUCI CQ
parent 6cb57a9847
commit b0acbd436d
44 changed files with 7683 additions and 7402 deletions

View File

@ -1,3 +1,2 @@
filter=-build/namespaces
filter=-readability/todo
filter=-runtime/indentation_namespace

View File

@ -32,9 +32,7 @@
namespace dawn::native::metal {
namespace {
struct KalmanInfo;
}
class Device final : public DeviceBase {
public:

View File

@ -42,11 +42,6 @@
namespace dawn::native::metal {
namespace {
// The time interval for each round of kalman filter
static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
struct KalmanInfo {
float filterValue; // The estimation value
float kalmanGain; // The kalman gain
@ -54,6 +49,11 @@ namespace dawn::native::metal {
float P; // The a posteriori estimate covariance
};
namespace {
// The time interval for each round of kalman filter
static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
// A simplified kalman filter for estimating timestamp period based on measured values
float KalmanFilter(KalmanInfo* info, float measuredValue) {
// Optimize kalman gain

View File

@ -45,7 +45,9 @@
namespace {
using namespace testing;
using testing::_;
using testing::MockCallback;
using testing::SaveArg;
class AdapterDiscoveryTests : public ::testing::Test {};

View File

@ -22,7 +22,9 @@
#include "dawn/utils/ComboRenderPipelineDescriptor.h"
#include "dawn/utils/WGPUHelpers.h"
using namespace testing;
using testing::_;
using testing::Exactly;
using testing::MockCallback;
class MockDeviceLostCallback {
public:

View File

@ -17,7 +17,7 @@
#include "dawn/tests/DawnTest.h"
#include "gmock/gmock.h"
using namespace testing;
using testing::InSequence;
class MockMapCallback {
public:
@ -67,7 +67,7 @@ class QueueTimelineTests : public DawnTest {
// when queue.OnSubmittedWorkDone is called after mMapReadBuffer.MapAsync. The callback order should
// happen in the order the functions are called.
TEST_P(QueueTimelineTests, MapRead_OnWorkDone) {
testing::InSequence sequence;
InSequence sequence;
EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1);
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
@ -83,7 +83,7 @@ TEST_P(QueueTimelineTests, MapRead_OnWorkDone) {
// queue.Signal is called before mMapReadBuffer.MapAsync. The callback order should
// happen in the order the functions are called.
TEST_P(QueueTimelineTests, OnWorkDone_MapRead) {
testing::InSequence sequence;
InSequence sequence;
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1);

View File

@ -17,7 +17,7 @@
#include "dawn/native/BuddyAllocator.h"
#include "gtest/gtest.h"
using namespace dawn::native;
namespace dawn::native {
constexpr uint64_t BuddyAllocator::kInvalidOffset;
@ -265,8 +265,8 @@ TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
// Verify the buddy allocator can deal with multiple allocations with mixed alignments.
TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
// After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8 byte
// alignment.
// After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8
// byte alignment.
//
// Level --------------------------------
// 0 32 | S |
@ -298,8 +298,8 @@ TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
// Verify the buddy allocator can deal with multiple allocations with equal alignments.
TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
// After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4 byte
// alignment.
// After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4
// byte alignment.
//
// Level --------------------------------
// 0 32 | S |
@ -327,3 +327,5 @@ TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
}
} // namespace dawn::native

View File

@ -22,11 +22,12 @@
#include "dawn/native/PooledResourceMemoryAllocator.h"
#include "dawn/native/ResourceHeapAllocator.h"
using namespace dawn::native;
namespace dawn::native {
class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
public:
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
uint64_t size) override {
return std::make_unique<ResourceHeapBase>();
}
void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
@ -434,8 +435,8 @@ TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
std::set<ResourceHeapBase*> heaps = {};
std::vector<ResourceMemoryAllocation> allocations = {};
// Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth of
// buffers. Otherwise, the heap may be reused if not full.
// Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth
// of buffers. Otherwise, the heap may be reused if not full.
constexpr uint32_t kNumOfHeaps = 10;
// Allocate |kNumOfHeaps| worth.
@ -459,3 +460,5 @@ TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
poolAllocator.DestroyPool();
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
}
} // namespace dawn::native

View File

@ -19,7 +19,7 @@
#include "gtest/gtest.h"
#include "dawn/native/CommandAllocator.h"
using namespace dawn::native;
namespace dawn::native {
// Definition of the command types used in the tests
enum class CommandType {
@ -458,7 +458,8 @@ TEST(CommandAllocator, AcquireCommandBlocks) {
{0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
{0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
};
const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2}, {3, 4}};
const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2},
{3, 4}};
const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
@ -466,7 +467,8 @@ TEST(CommandAllocator, AcquireCommandBlocks) {
for (size_t j = 0; j < kNumAllocators; ++j) {
CommandAllocator& allocator = allocators[j];
for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
CommandPipeline* pipeline =
allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = pipelines[j][i];
pipeline->attachmentPoint = attachmentPoints[j][i];
@ -502,3 +504,5 @@ TEST(CommandAllocator, AcquireCommandBlocks) {
ASSERT_FALSE(iterator.NextCommandId(&type));
iterator.MakeEmptyAsDataWasDestroyed();
}
} // namespace dawn::native

View File

@ -18,9 +18,7 @@
#include "dawn/native/ErrorData.h"
#include "gtest/gtest.h"
using namespace dawn::native;
namespace {
namespace dawn::native { namespace {
int placeholderSuccess = 0xbeef;
const char* placeholderErrorMessage = "I am an error message :3";
@ -360,4 +358,6 @@ namespace {
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
}
} // anonymous namespace
// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::native::

View File

@ -16,7 +16,7 @@
#include "dawn/native/PerStage.h"
using namespace dawn::native;
namespace dawn::native {
// Tests for StageBit
TEST(PerStage, StageBit) {
@ -87,3 +87,5 @@ TEST(PerStage, IterateNoStages) {
ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0);
ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
}
} // namespace dawn::native

View File

@ -18,7 +18,8 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace testing;
using testing::InSequence;
using testing::StrictMock;
namespace {
@ -34,7 +35,7 @@ namespace {
std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
class PlacementAllocatedTests : public Test {
class PlacementAllocatedTests : public testing::Test {
void SetUp() override {
mockDestructor = std::make_unique<StrictMock<MockDestructor>>();
}

View File

@ -17,7 +17,7 @@
#include "dawn/native/RingBufferAllocator.h"
#include "gtest/gtest.h"
using namespace dawn::native;
namespace dawn::native {
constexpr uint64_t RingBufferAllocator::kInvalidOffset;
@ -135,7 +135,8 @@ TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
// Ensure we are full.
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
RingBufferAllocator::kInvalidOffset);
// Reclaim the next two frames.
allocator.Deallocate(ExecutionSerial(4));
@ -157,7 +158,8 @@ TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
//
// Ensure we are full.
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
RingBufferAllocator::kInvalidOffset);
// Reclaim all.
allocator.Deallocate(kMaxExecutionSerial);
@ -173,3 +175,5 @@ TEST(RingBufferAllocatorTests, RingBufferOverflow) {
ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
RingBufferAllocator::kInvalidOffset);
}
} // namespace dawn::native

View File

@ -18,11 +18,11 @@
#include "dawn/native/SubresourceStorage.h"
#include "gtest/gtest.h"
using namespace dawn::native;
namespace dawn::native {
// A fake class that replicates the behavior of SubresourceStorage but without any compression and
// is used to compare the results of operations on SubresourceStorage against the "ground truth" of
// FakeStorage.
// A fake class that replicates the behavior of SubresourceStorage but without any compression
// and is used to compare the results of operations on SubresourceStorage against the "ground
// truth" of FakeStorage.
template <typename T>
struct FakeStorage {
FakeStorage(Aspect aspects,
@ -71,12 +71,11 @@ struct FakeStorage {
return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex);
}
// Method that checks that this and real have exactly the same content. It does so via looping
// on all subresources and calling Get() (hence testing Get()). It also calls Iterate()
// checking that every subresource is mentioned exactly once and that its content is correct
// (hence testing Iterate()).
// Its implementation requires the RangeTracker below that itself needs FakeStorage<int> so it
// cannot be define inline with the other methods.
// Method that checks that this and real have exactly the same content. It does so via
// looping on all subresources and calling Get() (hence testing Get()). It also calls
// Iterate() checking that every subresource is mentioned exactly once and that its content
// is correct (hence testing Iterate()). Its implementation requires the RangeTracker below
// that itself needs FakeStorage<int> so it cannot be define inline with the other methods.
void CheckSameAs(const SubresourceStorage<T>& real);
Aspect mAspects;
@ -106,7 +105,8 @@ struct RangeTracker {
}
void CheckTrackedExactly(const SubresourceRange& range) {
// Check that all subresources in the range were tracked once and set the counter back to 0.
// Check that all subresources in the range were tracked once and set the counter back
// to 0.
mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
ASSERT_EQ(*counter, 1u);
*counter = 0;
@ -195,8 +195,9 @@ void CheckLayerCompressed(const SubresourceStorage<T>& s,
bool seen = false;
s.Iterate([&](const SubresourceRange& range, const T&) {
if (range.aspects == aspect && range.layerCount == 1 && range.levelCount == levelCount &&
range.baseArrayLayer == layer && range.baseMipLevel == 0) {
if (range.aspects == aspect && range.layerCount == 1 &&
range.levelCount == levelCount && range.baseArrayLayer == layer &&
range.baseMipLevel == 0) {
seen = true;
}
});
@ -251,14 +252,15 @@ TEST(SubresourceStorageTest, DefaultValue) {
}
}
// The tests for Update() all follow the same pattern of setting up a real and a fake storage then
// performing one or multiple Update()s on them and checking:
// The tests for Update() all follow the same pattern of setting up a real and a fake storage
// then performing one or multiple Update()s on them and checking:
// - They have the same content.
// - The Update() range was correct.
// - The aspects and layers have the expected "compressed" status.
// Calls Update both on the read storage and the fake storage but intercepts the call to updateFunc
// done by the real storage to check their ranges argument aggregate to exactly the update range.
// Calls Update both on the read storage and the fake storage but intercepts the call to
// updateFunc done by the real storage to check their ranges argument aggregate to exactly the
// update range.
template <typename T, typename F>
void CallUpdateOnBoth(SubresourceStorage<T>* s,
FakeStorage<T>* f,
@ -317,7 +319,8 @@ TEST(SubresourceStorageTest, UpdateStipple) {
for (uint32_t layer = 0; layer < kLayers; layer++) {
for (uint32_t level = 0; level < kLevels; level++) {
if ((layer + level) % 2 == 0) {
SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Depth, layer, level);
SubresourceRange range =
SubresourceRange::MakeSingle(Aspect::Depth, layer, level);
CallUpdateOnBoth(&s, &f, range,
[](const SubresourceRange&, int* data) { *data += 17; });
}
@ -335,7 +338,8 @@ TEST(SubresourceStorageTest, UpdateStipple) {
{
SubresourceRange fullRange =
SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 31; });
CallUpdateOnBoth(&s, &f, fullRange,
[](const SubresourceRange&, int* data) { *data = 31; });
}
CheckAspectCompressed(s, Aspect::Depth, true);
@ -414,7 +418,8 @@ TEST(SubresourceStorageTest, UpdateExtremas) {
// Update half of the layers in full with constant values. Some recompression should happen.
{
SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels});
CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 123; });
CallUpdateOnBoth(&s, &f, range,
[](const SubresourceRange&, int* data) { *data = 123; });
}
CheckLayerCompressed(s, Aspect::Color, 0, true);
@ -423,16 +428,18 @@ TEST(SubresourceStorageTest, UpdateExtremas) {
// Update completely. Recompression should happen!
{
SubresourceRange fullRange = SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels);
CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 35; });
SubresourceRange fullRange =
SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels);
CallUpdateOnBoth(&s, &f, fullRange,
[](const SubresourceRange&, int* data) { *data = 35; });
}
CheckAspectCompressed(s, Aspect::Color, true);
}
// A regression test for an issue found while reworking the implementation where RecompressAspect
// didn't correctly check that each each layer was compressed but only that their 0th value was
// the same.
// A regression test for an issue found while reworking the implementation where
// RecompressAspect didn't correctly check that each each layer was compressed but only that
// their 0th value was the same.
TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) {
SubresourceStorage<int> s(Aspect::Color, 2, 2);
FakeStorage<int> f(Aspect::Color, 2, 2);
@ -544,16 +551,16 @@ TEST(SubresourceStorageTest, MergeFullInTwoBand) {
CheckLayerCompressed(s, Aspect::Depth, 3, false);
CheckLayerCompressed(s, Aspect::Depth, 4, false);
// Stencil is decompressed but all its layers are still compressed because there wasn't the mip
// band.
// Stencil is decompressed but all its layers are still compressed because there wasn't the
// mip band.
CheckAspectCompressed(s, Aspect::Stencil, false);
CheckLayerCompressed(s, Aspect::Stencil, 1, true);
CheckLayerCompressed(s, Aspect::Stencil, 2, true);
CheckLayerCompressed(s, Aspect::Stencil, 3, true);
CheckLayerCompressed(s, Aspect::Stencil, 4, true);
}
// Test the reverse, mergign two-bands in a full resource. This provides coverage for decompressing
// aspects / and partilly layers to match the compression of `other`
// Test the reverse, mergign two-bands in a full resource. This provides coverage for
// decompressing aspects / and partilly layers to match the compression of `other`
TEST(SubresourceStorageTest, MergeTwoBandInFull) {
const uint32_t kLayers = 5;
const uint32_t kLevels = 9;
@ -581,8 +588,8 @@ TEST(SubresourceStorageTest, MergeTwoBandInFull) {
CheckLayerCompressed(s, Aspect::Depth, 3, false);
CheckLayerCompressed(s, Aspect::Depth, 4, false);
// Stencil is decompressed but all its layers are still compressed because there wasn't the mip
// band.
// Stencil is decompressed but all its layers are still compressed because there wasn't the
// mip band.
CheckAspectCompressed(s, Aspect::Stencil, false);
CheckLayerCompressed(s, Aspect::Stencil, 1, true);
CheckLayerCompressed(s, Aspect::Stencil, 2, true);
@ -603,7 +610,8 @@ TEST(SubresourceStorageTest, MergeLayerBandInStipple) {
for (uint32_t layer = 0; layer < kLayers; layer++) {
for (uint32_t level = 0; level < kLevels; level++) {
if ((layer + level) % 2 == 0) {
SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, layer, level);
SubresourceRange range =
SubresourceRange::MakeSingle(Aspect::Color, layer, level);
CallUpdateOnBoth(&s, &f, range,
[](const SubresourceRange&, int* data) { *data += 17; });
}
@ -634,7 +642,8 @@ TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) {
// Set up s with zeros except (0, 1) which is garbage.
{
SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1);
CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
CallUpdateOnBoth(&s, &f, range,
[](const SubresourceRange&, int* data) { *data += 0xABC; });
}
// Other is 2x2 of zeroes
@ -658,7 +667,8 @@ TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) {
// Cause decompression by writing to a single subresource.
{
SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1);
CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
CallUpdateOnBoth(&s, &f, range,
[](const SubresourceRange&, int* data) { *data += 0xABC; });
}
// Check that the aspect's value of 3 was correctly decompressed in layer 0.
@ -675,3 +685,5 @@ TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) {
// - Two != being converted to == during a rework.
// - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed.
// - Missing decompression of layer 0 after introducing mInlineAspectData.
} // namespace dawn::native

View File

@ -22,9 +22,6 @@
// Make our own Base - Backend object pair, reusing the AdapterBase name
namespace dawn::native {
class AdapterBase : public RefCounted {};
} // namespace dawn::native
using namespace dawn::native;
class MyAdapter : public AdapterBase {};
@ -85,3 +82,5 @@ TEST(ToBackend, Ref) {
adapter->Release();
}
}
} // namespace dawn::native

View File

@ -24,8 +24,7 @@
#include "dawn/webgpu_cpp_print.h"
#include "gtest/gtest.h"
using namespace dawn::native::d3d12;
namespace dawn::native::d3d12 {
namespace {
struct TextureSpec {
@ -61,11 +60,12 @@ namespace {
// If there are multiple layers, 2D texture splitter actually splits each layer
// independently. See the details in Compute2DTextureCopySplits(). As a result,
// if we simply expand a copy region generated by 2D texture splitter to all
// layers, the copy region might be OOB. But that is not the approach that the current
// 2D texture splitter is doing, although Compute2DTextureCopySubresource forwards
// "copySize.depthOrArrayLayers" to the copy region it generated. So skip the test
// below for 2D textures with multiple layers.
if (textureSpec.depthOrArrayLayers <= 1 || dimension == wgpu::TextureDimension::e3D) {
// layers, the copy region might be OOB. But that is not the approach that the
// current 2D texture splitter is doing, although Compute2DTextureCopySubresource
// forwards "copySize.depthOrArrayLayers" to the copy region it generated. So skip
// the test below for 2D textures with multiple layers.
if (textureSpec.depthOrArrayLayers <= 1 ||
dimension == wgpu::TextureDimension::e3D) {
uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
uint64_t minimumRequiredBufferSize =
@ -75,8 +75,9 @@ namespace {
textureSpec.depthOrArrayLayers,
textureSpec.texelBlockSizeInBytes);
// The last pixel (buffer footprint) of each copy region depends on its bufferOffset
// and copySize. It is not the last pixel where the bufferSize ends.
// The last pixel (buffer footprint) of each copy region depends on its
// bufferOffset and copySize. It is not the last pixel where the bufferSize
// ends.
ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
@ -96,8 +97,8 @@ namespace {
copy.bufferSize.depthOrArrayLayers,
textureSpec.texelBlockSizeInBytes);
// The buffer footprint of each copy region should not exceed the minimum required
// buffer size. Otherwise, pixels accessed by copy may be OOB.
// The buffer footprint of each copy region should not exceed the minimum
// required buffer size. Otherwise, pixels accessed by copy may be OOB.
ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
}
}
@ -106,8 +107,8 @@ namespace {
// Check that the offset is aligned
void ValidateOffset(const TextureCopySubresource& copySplit) {
for (uint32_t i = 0; i < copySplit.count; ++i) {
ASSERT_TRUE(
Align(copySplit.copies[i].alignedOffset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
ASSERT_TRUE(Align(copySplit.copies[i].alignedOffset,
D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
copySplit.copies[i].alignedOffset);
}
}
@ -127,11 +128,11 @@ namespace {
// [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
// included.
bool overlapX = InclusiveRangesOverlap(
a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1, b.textureOffset.x,
b.textureOffset.x + b.copySize.width - 1);
a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1,
b.textureOffset.x, b.textureOffset.x + b.copySize.width - 1);
bool overlapY = InclusiveRangesOverlap(
a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1, b.textureOffset.y,
b.textureOffset.y + b.copySize.height - 1);
a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1,
b.textureOffset.y, b.textureOffset.y + b.copySize.height - 1);
bool overlapZ = InclusiveRangesOverlap(
a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
@ -148,10 +149,12 @@ namespace {
uint32_t minX = copySplit.copies[0].textureOffset.x;
uint32_t minY = copySplit.copies[0].textureOffset.y;
uint32_t minZ = copySplit.copies[0].textureOffset.z;
uint32_t maxX = copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
uint32_t maxY = copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
uint32_t maxZ =
copySplit.copies[0].textureOffset.z + copySplit.copies[0].copySize.depthOrArrayLayers;
uint32_t maxX =
copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
uint32_t maxY =
copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
uint32_t maxZ = copySplit.copies[0].textureOffset.z +
copySplit.copies[0].copySize.depthOrArrayLayers;
for (uint32_t i = 1; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
@ -183,7 +186,8 @@ namespace {
ASSERT_GT(copiedPixels, 0u);
count += copiedPixels;
}
ASSERT_EQ(count, textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
ASSERT_EQ(count,
textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
}
// Check that every buffer offset is at the correct pixel location
@ -246,7 +250,8 @@ namespace {
os << "TextureSpec("
<< "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
<< textureSpec.width << ", " << textureSpec.height << ", "
<< textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes << ")";
<< textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes
<< ")";
return os;
}
@ -264,10 +269,10 @@ namespace {
<< copy.textureOffset.y << ", " << copy.textureOffset.z << "), size ("
<< copy.copySize.width << ", " << copy.copySize.height << ", "
<< copy.copySize.depthOrArrayLayers << ")" << std::endl;
os << " " << i << ": Buffer at (" << copy.bufferOffset.x << ", " << copy.bufferOffset.y
<< ", " << copy.bufferOffset.z << "), footprint (" << copy.bufferSize.width << ", "
<< copy.bufferSize.height << ", " << copy.bufferSize.depthOrArrayLayers << ")"
<< std::endl;
os << " " << i << ": Buffer at (" << copy.bufferOffset.x << ", "
<< copy.bufferOffset.y << ", " << copy.bufferOffset.z << "), footprint ("
<< copy.bufferSize.width << ", " << copy.bufferSize.height << ", "
<< copy.bufferSize.depthOrArrayLayers << ")" << std::endl;
}
return os;
}
@ -320,8 +325,8 @@ namespace {
{64, 48, 16, 1024, 1024, 1, 16, 4, 4},
};
// Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow is
// the minimum required
// Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow
// is the minimum required
std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width,
kTextureBytesPerRowAlignment);
@ -367,7 +372,8 @@ namespace {
}
// Define a list of values to set properties in the spec structs
constexpr uint32_t kCheckValues[] = {1, 2, 3, 4, 5, 6, 7, 8, // small values
constexpr uint32_t kCheckValues[] = {
1, 2, 3, 4, 5, 6, 7, 8, // small values
16, 32, 64, 128, 256, 512, 1024, 2048, // powers of 2
15, 31, 63, 127, 257, 511, 1023, 2047, // misalignments
17, 33, 65, 129, 257, 513, 1025, 2049};
@ -527,4 +533,7 @@ TEST_P(CopySplitTest, ImageHeight) {
INSTANTIATE_TEST_SUITE_P(,
CopySplitTest,
testing::Values(wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D));
testing::Values(wgpu::TextureDimension::e2D,
wgpu::TextureDimension::e3D));
} // namespace dawn::native::d3d12

View File

@ -21,9 +21,12 @@
#include "dawn/tests/DawnNativeTest.h"
#include "dawn/utils/WGPUHelpers.h"
namespace dawn::native {
class CommandBufferEncodingTests : public DawnNativeTest {
protected:
void ExpectCommands(dawn::native::CommandIterator* commands,
void ExpectCommands(
dawn::native::CommandIterator* commands,
std::vector<std::pair<dawn::native::Command,
std::function<void(dawn::native::CommandIterator*)>>>
expectedCommands) {
@ -40,8 +43,6 @@ class CommandBufferEncodingTests : public DawnNativeTest {
// Indirect dispatch validation changes the bind groups in the middle
// of a pass. Test that bindings are restored after the validation runs.
TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) {
using namespace dawn::native;
wgpu::BindGroupLayout staticLayout =
utils::MakeBindGroupLayout(device, {{
0,
@ -74,8 +75,8 @@ TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestor
wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc);
// Create buffers to use for both the indirect buffer and the bind groups.
wgpu::Buffer indirectBuffer =
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
wgpu::Buffer indirectBuffer = utils::CreateBufferFromData<uint32_t>(
device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
wgpu::BufferDescriptor uniformBufferDesc = {};
uniformBufferDesc.size = 512;
@ -217,7 +218,9 @@ TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestor
FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(),
{
{Command::BeginComputePass,
[&](CommandIterator* commands) { SkipCommand(commands, Command::BeginComputePass); }},
[&](CommandIterator* commands) {
SkipCommand(commands, Command::BeginComputePass);
}},
// Expect the state to be set.
{Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
{Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
@ -276,8 +279,6 @@ TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestor
// and does not leak state changes that occured between a snapshot and the
// state restoration.
TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
using namespace dawn::native;
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
@ -310,3 +311,5 @@ TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
// Expect no pipeline
EXPECT_FALSE(stateTracker->HasPipeline());
}
} // namespace dawn::native

View File

@ -25,9 +25,13 @@
namespace {
using namespace testing;
using testing::Contains;
using testing::MockCallback;
using testing::NotNull;
using testing::SaveArg;
using testing::StrEq;
class DeviceCreationTest : public Test {
class DeviceCreationTest : public testing::Test {
protected:
void SetUp() override {
dawnProcSetProcs(&dawn::native::GetProcs());
@ -83,7 +87,7 @@ namespace {
EXPECT_NE(device, nullptr);
auto toggles = dawn::native::GetTogglesUsed(device.Get());
EXPECT_THAT(toggles, testing::Contains(testing::StrEq(toggle)));
EXPECT_THAT(toggles, Contains(StrEq(toggle)));
}
TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) {

View File

@ -18,7 +18,8 @@
#include "gmock/gmock.h"
#include "dawn/tests/unittests/validation/ValidationTest.h"
using namespace testing;
using testing::_;
using testing::InvokeWithoutArgs;
class MockBufferMapAsyncCallback {
public:

View File

@ -18,7 +18,9 @@
#include "dawn/tests/unittests/validation/ValidationTest.h"
#include "gmock/gmock.h"
using namespace testing;
using testing::_;
using testing::MockCallback;
using testing::Sequence;
class MockDevicePopErrorScopeCallback {
public:
@ -170,7 +172,7 @@ TEST_F(ErrorScopeValidationTest, EnclosedQueueSubmitNested) {
queue.Submit(0, nullptr);
queue.OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this);
testing::Sequence seq;
Sequence seq;
MockCallback<WGPUErrorCallback> errorScopeCallback2;
EXPECT_CALL(errorScopeCallback2, Call(WGPUErrorType_NoError, _, this + 1)).InSequence(seq);

View File

@ -16,7 +16,12 @@
#include "dawn/tests/MockCallback.h"
using namespace testing;
using testing::_;
using testing::Invoke;
using testing::MockCallback;
using testing::NotNull;
using testing::StrictMock;
using testing::WithArg;
class MultipleDeviceTest : public ValidationTest {};

View File

@ -17,8 +17,6 @@
#include "dawn/tests/unittests/validation/ValidationTest.h"
#include "gmock/gmock.h"
using namespace testing;
class MockQueueWorkDoneCallback {
public:
MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));

View File

@ -23,10 +23,17 @@
#include "webgpu/webgpu_cpp.h"
namespace {
namespace dawn::wire { namespace {
using namespace testing;
using namespace dawn::wire;
using testing::_;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::MockCallback;
using testing::NotNull;
using testing::Return;
using testing::SaveArg;
using testing::StrEq;
using testing::WithArg;
class WireAdapterTests : public WireTest {
protected:
@ -328,4 +335,6 @@ namespace {
GetWireClient()->Disconnect();
}
} // anonymous namespace
// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::wire::

View File

@ -18,8 +18,11 @@
#include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/common/Constants.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::Return;
using testing::Sequence;
class WireArgumentTests : public WireTest {
public:
@ -35,10 +38,12 @@ TEST_F(WireArgumentTests, ValueArgument) {
wgpuComputePassEncoderDispatch(pass, 1, 2, 3);
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiEncoder));
WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
.WillOnce(Return(apiPass));
EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
@ -70,13 +75,16 @@ TEST_F(WireArgumentTests, ValueArrayArgument) {
WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(), testOffsets.data());
wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(),
testOffsets.data());
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiEncoder));
WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
.WillOnce(Return(apiPass));
EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
apiPass, 0, apiBindGroup, testOffsets.size(),
@ -167,8 +175,8 @@ TEST_F(WireArgumentTests, CStringArgument) {
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
EXPECT_CALL(api,
DeviceCreateRenderPipeline(
EXPECT_CALL(
api, DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->vertex.entryPoint == std::string("main");
})))
@ -181,7 +189,8 @@ TEST_F(WireArgumentTests, CStringArgument) {
TEST_F(WireArgumentTests, ObjectAsValueArgument) {
WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiEncoder));
WGPUBufferDescriptor descriptor = {};
descriptor.size = 8;
@ -195,7 +204,8 @@ TEST_F(WireArgumentTests, ObjectAsValueArgument) {
.RetiresOnSaturation();
wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
EXPECT_CALL(api, CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
EXPECT_CALL(api,
CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
FlushClient();
}
@ -226,7 +236,8 @@ TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
wgpuQueueSubmit(queue, 2, cmdBufs);
EXPECT_CALL(
api, QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
api,
QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1];
})));
@ -249,8 +260,11 @@ TEST_F(WireArgumentTests, StructureOfValuesArgument) {
wgpuDeviceCreateSampler(device, &descriptor);
WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
EXPECT_CALL(api, DeviceCreateSampler(
apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool {
EXPECT_CALL(
api, DeviceCreateSampler(
apiDevice,
MatchesLambda(
[](const WGPUSamplerDescriptor* desc) -> bool {
return desc->nextInChain == nullptr &&
desc->magFilter == WGPUFilterMode_Linear &&
desc->minFilter == WGPUFilterMode_Nearest &&
@ -283,11 +297,11 @@ TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
wgpuDeviceCreatePipelineLayout(device, &descriptor);
WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
EXPECT_CALL(api, DeviceCreatePipelineLayout(
EXPECT_CALL(
api, DeviceCreatePipelineLayout(
apiDevice,
MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
return desc->nextInChain == nullptr &&
desc->bindGroupLayoutCount == 1 &&
return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
desc->bindGroupLayouts[0] == apiBgl;
})))
.WillOnce(Return(apiPlaceholderLayout));
@ -327,15 +341,16 @@ TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
EXPECT_CALL(
api,
EXPECT_CALL(api,
DeviceCreateBindGroupLayout(
apiDevice, MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
apiDevice,
MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
for (int i = 0; i < NUM_BINDINGS; ++i) {
const auto& a = desc->entries[i];
const auto& b = entries[i];
if (a.binding != b.binding || a.visibility != b.visibility ||
a.buffer.type != b.buffer.type || a.sampler.type != b.sampler.type ||
a.buffer.type != b.buffer.type ||
a.sampler.type != b.sampler.type ||
a.texture.sampleType != b.texture.sampleType) {
return false;
}
@ -356,8 +371,8 @@ TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
descriptor.bindGroupLayouts = &nullBGL;
wgpuDeviceCreatePipelineLayout(device, &descriptor);
EXPECT_CALL(api,
DeviceCreatePipelineLayout(
EXPECT_CALL(
api, DeviceCreatePipelineLayout(
apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
desc->bindGroupLayouts[0] == nullptr;
@ -366,3 +381,5 @@ TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
FlushClient();
}
} // namespace dawn::wire

View File

@ -14,8 +14,9 @@
#include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::Return;
class WireBasicTests : public WireTest {
public:
@ -45,7 +46,8 @@ TEST_F(WireBasicTests, CreateThenCall) {
.WillOnce(Return(apiCmdBufEncoder));
WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer();
EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)).WillOnce(Return(apiCmdBuf));
EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
.WillOnce(Return(apiCmdBuf));
FlushClient();
}
@ -78,3 +80,5 @@ TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
FlushClient();
}
} // namespace dawn::wire

View File

@ -18,8 +18,13 @@
#include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::InvokeWithoutArgs;
using testing::Mock;
using testing::Return;
using testing::StrictMock;
namespace {
@ -102,7 +107,8 @@ class WireBufferMappingReadTests : public WireBufferMappingTests {
// Check mapping for reading a succesfully created buffer
TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
@ -118,8 +124,8 @@ TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
FlushServer();
EXPECT_EQ(bufferContent,
*static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
EXPECT_EQ(bufferContent, *static_cast<const uint32_t*>(
wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
wgpuBufferUnmap(buffer);
EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
@ -130,11 +136,13 @@ TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
// Check that things work correctly when a validation error happens when mapping the buffer for
// reading
TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
FlushClient();
@ -145,10 +153,11 @@ TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) {
EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize));
}
// Check that the map read callback is called with UNKNOWN when the buffer is destroyed before the
// request is finished
// Check that the map read callback is called with UNKNOWN when the buffer is destroyed before
// the request is finished
TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// Return success
uint32_t bufferContent = 0;
@ -161,7 +170,8 @@ TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
// Destroy before the client gets the success, so the callback is called with
// DestroyedBeforeCallback.
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
EXPECT_CALL(*mockBufferMapCallback,
Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
.Times(1);
wgpuBufferRelease(buffer);
EXPECT_CALL(api, BufferRelease(apiBuffer));
@ -170,10 +180,11 @@ TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
FlushServer();
}
// Check the map read callback is called with "UnmappedBeforeCallback" when the map request would
// have worked, but Unmap was called
// Check the map read callback is called with "UnmappedBeforeCallback" when the map request
// would have worked, but Unmap was called
TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
@ -192,7 +203,8 @@ TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
// The callback shouldn't get called with success, even when the request succeeded on the
// server side
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
EXPECT_CALL(*mockBufferMapCallback,
Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
.Times(1);
FlushServer();
@ -201,11 +213,13 @@ TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
// Check that even if Unmap() was called early client-side, we correctly surface server-side
// validation errors.
TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
// Oh no! We are calling Unmap too early! However the callback gets fired only after we get
// an answer from the server that the mapAsync call was an error.
@ -214,16 +228,18 @@ TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError)
FlushClient();
// The callback should be called with the server-side error and not the UnmappedBeforeCallback.
// The callback should be called with the server-side error and not the
// UnmappedBeforeCallback.
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
FlushServer();
}
// Check the map read callback is called with "DestroyedBeforeCallback" when the map request would
// have worked, but Destroy was called
// Check the map read callback is called with "DestroyedBeforeCallback" when the map request
// would have worked, but Destroy was called
TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
@ -242,7 +258,8 @@ TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
// The callback shouldn't get called with success, even when the request succeeded on the
// server side
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
EXPECT_CALL(*mockBufferMapCallback,
Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
.Times(1);
FlushServer();
@ -251,20 +268,23 @@ TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
// Check that even if Destroy() was called early client-side, we correctly surface server-side
// validation errors.
TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
// Oh no! We are calling Destroy too early! However the callback gets fired only after we get
// an answer from the server that the mapAsync call was an error.
// Oh no! We are calling Destroy too early! However the callback gets fired only after we
// get an answer from the server that the mapAsync call was an error.
wgpuBufferDestroy(buffer);
EXPECT_CALL(api, BufferDestroy(apiBuffer));
FlushClient();
// The callback should be called with the server-side error and not the DestroyedBeforCallback..
// The callback should be called with the server-side error and not the
// DestroyedBeforCallback..
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
FlushServer();
@ -274,7 +294,8 @@ TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideErro
// mapped range
TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) {
// Successful map
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
@ -291,10 +312,12 @@ TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUncha
FlushServer();
// Map failure while the buffer is already mapped
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
FlushClient();
@ -302,13 +325,14 @@ TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUncha
FlushServer();
EXPECT_EQ(bufferContent,
*static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
EXPECT_EQ(bufferContent, *static_cast<const uint32_t*>(
wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
}
// Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback
TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
@ -333,7 +357,8 @@ TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) {
// Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the
// callback
TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
@ -371,7 +396,8 @@ class WireBufferMappingWriteTests : public WireBufferMappingTests {
// Check mapping for writing a succesfully created buffer
TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t serverBufferContent = 31337;
uint32_t updatedContent = 4242;
@ -409,11 +435,13 @@ TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) {
// Check that things work correctly when a validation error happens when mapping the buffer for
// writing
TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
FlushClient();
@ -427,7 +455,8 @@ TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) {
// Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is
// destroyed before the request is finished
TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// Return success
uint32_t bufferContent = 31337;
@ -440,7 +469,8 @@ TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
// Destroy before the client gets the success, so the callback is called with
// DestroyedBeforeCallback.
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
EXPECT_CALL(*mockBufferMapCallback,
Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
.Times(1);
wgpuBufferRelease(buffer);
EXPECT_CALL(api, BufferRelease(apiBuffer));
@ -449,10 +479,11 @@ TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
FlushServer();
}
// Check the map write callback is called with "UnmappedBeforeCallback" when the map request would
// have worked, but Unmap was called
// Check the map write callback is called with "UnmappedBeforeCallback" when the map request
// would have worked, but Unmap was called
TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
@ -465,7 +496,8 @@ TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
FlushClient();
// Oh no! We are calling Unmap too early!
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
EXPECT_CALL(*mockBufferMapCallback,
Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
.Times(1);
wgpuBufferUnmap(buffer);
@ -476,7 +508,8 @@ TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
// Check that an error map write while a buffer is already mapped
TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
// Successful map
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
@ -493,10 +526,12 @@ TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
FlushServer();
// Map failure while the buffer is already mapped
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
FlushClient();
@ -504,13 +539,14 @@ TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
FlushServer();
EXPECT_NE(nullptr,
static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
EXPECT_NE(nullptr, static_cast<const uint32_t*>(
wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
}
// Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback
TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
@ -532,10 +568,11 @@ TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) {
FlushClient();
}
// Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in the
// callback
// Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in
// the callback
TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) {
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
uint32_t bufferContent = 31337;
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
@ -623,7 +660,8 @@ TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) {
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
@ -655,11 +693,13 @@ TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
FlushClient();
@ -667,8 +707,8 @@ TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
FlushServer();
EXPECT_NE(nullptr,
static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
EXPECT_NE(nullptr, static_cast<const uint32_t*>(
wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
wgpuBufferUnmap(buffer);
EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
@ -676,8 +716,8 @@ TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
FlushClient();
}
// Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client and
// never gets to the server-side.
// Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client
// and never gets to the server-side.
TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) {
size_t kOOMSize = std::numeric_limits<size_t>::max();
WGPUBuffer apiBuffer = api.GetNewBuffer();
@ -724,7 +764,8 @@ TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) {
// DeviceLost.
TEST_F(WireBufferMappingTests, MapThenDisconnect) {
SetupBuffer(WGPUMapMode_Write);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, this);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
this);
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
@ -734,7 +775,8 @@ TEST_F(WireBufferMappingTests, MapThenDisconnect) {
FlushClient();
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
.Times(1);
GetWireClient()->Disconnect();
}
@ -745,7 +787,8 @@ TEST_F(WireBufferMappingTests, MapAfterDisconnect) {
GetWireClient()->Disconnect();
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
.Times(1);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this);
}
@ -812,3 +855,5 @@ TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) {
.Times(1 + testData.numRequests);
wgpuBufferRelease(buffer);
}
} // namespace dawn::wire

View File

@ -17,11 +17,17 @@
#include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
namespace {
using testing::_;
using testing::InvokeWithoutArgs;
using testing::Mock;
using testing::Return;
using testing::Sequence;
using testing::StrEq;
using testing::StrictMock;
// Mock class to add expectations on the wire calling callbacks
class MockCreateComputePipelineAsyncCallback {
public:
@ -375,3 +381,5 @@ TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
FlushClient();
DefaultApiDeviceWasReleased();
}
} // namespace dawn::wire

View File

@ -15,8 +15,10 @@
#include "dawn/tests/MockCallback.h"
#include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::Return;
using testing::Sequence;
class WireDestroyObjectTests : public WireTest {};
@ -25,7 +27,8 @@ TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiEncoder));
FlushClient();
@ -56,3 +59,5 @@ TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
wgpuCommandEncoderFinish(encoder, nullptr);
FlushClient(false);
}
} // namespace dawn::wire

View File

@ -18,8 +18,15 @@
#include "dawn/tests/MockCallback.h"
#include "dawn/wire/WireClient.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::Exactly;
using testing::InvokeWithoutArgs;
using testing::MockCallback;
using testing::Return;
using testing::Sequence;
using testing::StrEq;
namespace {
@ -75,7 +82,8 @@ TEST_F(WireDisconnectTests, CallsDeviceLostCallback) {
GetWireClient()->Disconnect();
}
// Check that disconnecting the wire client after a device loss does not trigger the callback again.
// Check that disconnecting the wire client after a device loss does not trigger the callback
// again.
TEST_F(WireDisconnectTests, ServerLostThenDisconnect) {
MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
@ -172,3 +180,5 @@ TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) {
// Signal that we already released and cleared callbacks for |apiDevice|
DefaultApiDeviceWasReleased();
}
} // namespace dawn::wire

View File

@ -17,8 +17,15 @@
#include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::DoAll;
using testing::Mock;
using testing::Return;
using testing::SaveArg;
using testing::StrEq;
using testing::StrictMock;
namespace {
@ -38,7 +45,8 @@ namespace {
MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
};
std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>> mockDevicePopErrorScopeCallback;
std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>>
mockDevicePopErrorScopeCallback;
void ToMockDevicePopErrorScopeCallback(WGPUErrorType type,
const char* message,
void* userdata) {
@ -51,13 +59,17 @@ namespace {
};
std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
void ToMockDeviceLoggingCallback(WGPULoggingType type, const char* message, void* userdata) {
void ToMockDeviceLoggingCallback(WGPULoggingType type,
const char* message,
void* userdata) {
mockDeviceLoggingCallback->Call(type, message, userdata);
}
class MockDeviceLostCallback {
public:
MOCK_METHOD(void, Call, (WGPUDeviceLostReason reason, const char* message, void* userdata));
MOCK_METHOD(void,
Call,
(WGPUDeviceLostReason reason, const char* message, void* userdata));
};
std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
@ -132,7 +144,8 @@ TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
// client side
api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
EXPECT_CALL(*mockDeviceLoggingCallback, Call(WGPULoggingType_Info, StrEq("Some message"), this))
EXPECT_CALL(*mockDeviceLoggingCallback,
Call(WGPULoggingType_Info, StrEq("Some message"), this))
.Times(1);
FlushServer();
@ -233,7 +246,8 @@ TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) {
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
FlushClient();
// Incomplete callback called in Device destructor. This is resolved after the end of this test.
// Incomplete callback called in Device destructor. This is resolved after the end of this
// test.
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Unknown, ValidStringMessage(), this))
.Times(1);
@ -304,3 +318,5 @@ TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
FlushServer();
}
} // namespace dawn::wire

View File

@ -14,8 +14,13 @@
#include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::Invoke;
using testing::NotNull;
using testing::Return;
using testing::Unused;
class WireExtensionTests : public WireTest {
public:
@ -82,8 +87,8 @@ TEST_F(WireExtensionTests, MutlipleChainedStructs) {
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
.WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
-> WGPURenderPipeline {
const auto* ext1 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain);
EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType);
@ -106,8 +111,8 @@ TEST_F(WireExtensionTests, MutlipleChainedStructs) {
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
.WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
-> WGPURenderPipeline {
const auto* ext2 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain);
EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType);
@ -239,3 +244,5 @@ TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) {
}));
FlushClient();
}
} // namespace dawn::wire

View File

@ -17,8 +17,12 @@
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::Exactly;
using testing::Mock;
using testing::Return;
class WireInjectDeviceTests : public WireTest {
public:
@ -49,7 +53,8 @@ TEST_F(WireInjectDeviceTests, CallAfterReserveInject) {
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
@ -82,7 +87,8 @@ TEST_F(WireInjectDeviceTests, InjectExistingID) {
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
@ -103,7 +109,8 @@ TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
// Releasing the device removes a single reference and clears its error callbacks.
wgpuDeviceRelease(reservation.device);
EXPECT_CALL(api, DeviceRelease(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
FlushClient();
@ -144,7 +151,8 @@ TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
@ -174,23 +182,28 @@ TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
// Test that both devices can be reflected.
ASSERT_EQ(serverDevice1, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
ASSERT_EQ(serverDevice1,
GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
ASSERT_EQ(serverDevice2,
GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
// Release the first device
wgpuDeviceRelease(reservation1.device);
EXPECT_CALL(api, DeviceRelease(serverDevice1));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
FlushClient();
// The first device should no longer reflect, but the second should
ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
ASSERT_EQ(serverDevice2,
GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
}
@ -237,10 +250,12 @@ TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
FlushClient();
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
}
@ -270,3 +285,5 @@ TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
FlushClient();
}
}
} // namespace dawn::wire

View File

@ -17,10 +17,11 @@
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire { namespace {
namespace {
using testing::Mock;
using testing::NotNull;
using testing::Return;
class WireInjectInstanceTests : public WireTest {
public:
@ -116,4 +117,6 @@ namespace {
}
}
} // anonymous namespace
// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::wire::

View File

@ -17,8 +17,9 @@
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::Mock;
class WireInjectSwapChainTests : public WireTest {
public:
@ -114,3 +115,5 @@ TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
FlushClient();
}
}
} // namespace dawn::wire

View File

@ -17,8 +17,10 @@
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::Mock;
using testing::Return;
class WireInjectTextureTests : public WireTest {
public:
@ -34,12 +36,14 @@ TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
WGPUTexture apiTexture = api.GetNewTexture();
EXPECT_CALL(api, TextureReference(apiTexture));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
reservation.deviceId, reservation.deviceGeneration));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
wgpuTextureCreateView(reservation.texture, nullptr);
WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiPlaceholderView));
EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr))
.WillOnce(Return(apiPlaceholderView));
FlushClient();
}
@ -58,12 +62,13 @@ TEST_F(WireInjectTextureTests, InjectExistingID) {
WGPUTexture apiTexture = api.GetNewTexture();
EXPECT_CALL(api, TextureReference(apiTexture));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
reservation.deviceId, reservation.deviceGeneration));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// ID already in use, call fails.
ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
reservation.deviceId,
ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
}
@ -74,8 +79,9 @@ TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
// Injecting the texture adds a reference
WGPUTexture apiTexture = api.GetNewTexture();
EXPECT_CALL(api, TextureReference(apiTexture));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
reservation.deviceId, reservation.deviceGeneration));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// Releasing the texture removes a single reference.
wgpuTextureRelease(reservation.texture);
@ -112,3 +118,5 @@ TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
FlushClient();
}
}
} // namespace dawn::wire

View File

@ -23,10 +23,16 @@
#include "webgpu/webgpu_cpp.h"
namespace {
namespace dawn::wire { namespace {
using namespace testing;
using namespace dawn::wire;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::MockCallback;
using testing::NotNull;
using testing::Return;
using testing::SetArgPointee;
using testing::StrEq;
using testing::WithArg;
class WireInstanceBasicTest : public WireTest {};
class WireInstanceTests : public WireTest {
@ -284,4 +290,6 @@ namespace {
GetWireClient()->Disconnect();
}
} // anonymous namespace
// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::wire::

View File

@ -19,8 +19,16 @@
#include "dawn/wire/client/ClientMemoryTransferService_mock.h"
#include "dawn/wire/server/ServerMemoryTransferService_mock.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::Eq;
using testing::InvokeWithoutArgs;
using testing::Mock;
using testing::Pointee;
using testing::Return;
using testing::StrictMock;
using testing::WithArg;
namespace {
@ -45,9 +53,9 @@ namespace {
// and for mocking failures. The helpers are designed such that for a given run of a test,
// a Serialization expection has a corresponding Deserialization expectation for which the
// serialized data must match.
// There are tests which check for Success for every mapping operation which mock an entire mapping
// operation from map to unmap, and add all MemoryTransferService expectations.
// Tests which check for errors perform the same mapping operations but insert mocked failures for
// There are tests which check for Success for every mapping operation which mock an entire
// mapping operation from map to unmap, and add all MemoryTransferService expectations. Tests
// which check for errors perform the same mapping operations but insert mocked failures for
// various mapping or MemoryTransferService operations.
class WireMemoryTransferServiceTests : public WireTest {
public:
@ -102,7 +110,8 @@ class WireMemoryTransferServiceTests : public WireTest {
using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle;
using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle;
std::pair<WGPUBuffer, WGPUBuffer> CreateBuffer(WGPUBufferUsage usage = WGPUBufferUsage_None) {
std::pair<WGPUBuffer, WGPUBuffer> CreateBuffer(
WGPUBufferUsage usage = WGPUBufferUsage_None) {
WGPUBufferDescriptor descriptor = {};
descriptor.size = kBufferSize;
descriptor.usage = usage;
@ -185,7 +194,8 @@ class WireMemoryTransferServiceTests : public WireTest {
void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) {
EXPECT_CALL(serverMemoryTransferService,
OnReadHandleSizeOfSerializeDataUpdate(handle, _, _))
.WillOnce(InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); }));
.WillOnce(
InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); }));
EXPECT_CALL(serverMemoryTransferService,
OnReadHandleSerializeDataUpdate(handle, _, _, _, _))
.WillOnce(WithArg<4>([&](void* serializePointer) {
@ -266,8 +276,10 @@ class WireMemoryTransferServiceTests : public WireTest {
void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) {
EXPECT_CALL(clientMemoryTransferService,
OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _))
.WillOnce(InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); }));
EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeDataUpdate(handle, _, _, _))
.WillOnce(
InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); }));
EXPECT_CALL(clientMemoryTransferService,
OnWriteHandleSerializeDataUpdate(handle, _, _, _))
.WillOnce(WithArg<1>([&](void* serializePointer) {
memcpy(serializePointer, &mWriteHandleSerializeDataInfo,
sizeof(mWriteHandleSerializeDataInfo));
@ -277,17 +289,17 @@ class WireMemoryTransferServiceTests : public WireTest {
void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle,
uint32_t expectedData) {
EXPECT_CALL(
serverMemoryTransferService,
OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
EXPECT_CALL(serverMemoryTransferService,
OnWriteHandleDeserializeDataUpdate(
handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
sizeof(mWriteHandleSerializeDataInfo), _, _))
.WillOnce(Return(true));
}
void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) {
EXPECT_CALL(
serverMemoryTransferService,
OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
EXPECT_CALL(serverMemoryTransferService,
OnWriteHandleDeserializeDataUpdate(
handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
sizeof(mWriteHandleSerializeDataInfo), _, _))
.WillOnce(Return(false));
}
@ -313,8 +325,8 @@ class WireMemoryTransferServiceTests : public WireTest {
// mUpdatedBufferContent| after all writes are flushed.
static uint32_t mUpdatedBufferContent;
testing::StrictMock<dawn::wire::server::MockMemoryTransferService> serverMemoryTransferService;
testing::StrictMock<dawn::wire::client::MockMemoryTransferService> clientMemoryTransferService;
StrictMock<dawn::wire::server::MockMemoryTransferService> serverMemoryTransferService;
StrictMock<dawn::wire::client::MockMemoryTransferService> clientMemoryTransferService;
};
uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337;
@ -340,7 +352,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) {
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// The handle serialize data update on mapAsync cmd
ExpectServerReadHandleSerializeDataUpdate(serverHandle);
@ -416,12 +429,14 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) {
std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// Mock a failed callback.
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
FlushClient();
@ -489,7 +504,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure
std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// The handle serialize data update on mapAsync cmd
ExpectServerReadHandleSerializeDataUpdate(serverHandle);
@ -535,7 +551,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) {
std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// The handle serialize data update on mapAsync cmd
ExpectServerReadHandleSerializeDataUpdate(serverHandle);
@ -593,7 +610,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) {
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// Mock a successful callback.
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
@ -661,7 +679,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) {
WGPUBuffer buffer;
WGPUBuffer apiBuffer;
// The client should create and serialize a WriteHandle on buffer creation with MapWrite usage.
// The client should create and serialize a WriteHandle on buffer creation with MapWrite
// usage.
ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
ExpectWriteHandleSerialization(clientHandle);
@ -672,12 +691,14 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) {
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// Mock an error callback.
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
.WillOnce(InvokeWithoutArgs(
[&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
.WillOnce(InvokeWithoutArgs([&]() {
api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
}));
FlushClient();
@ -714,7 +735,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailu
WGPUBuffer buffer;
WGPUBuffer apiBuffer;
// The client should create and serialize a WriteHandle on buffer creation with MapWrite usage.
// The client should create and serialize a WriteHandle on buffer creation with MapWrite
// usage.
ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
ExpectWriteHandleSerialization(clientHandle);
@ -744,7 +766,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailur
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// Mock a successful callback.
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
@ -796,7 +819,8 @@ TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) {
FlushClient();
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
nullptr);
// Mock a successful callback.
EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
@ -982,7 +1006,8 @@ TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) {
// Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch
// data pointer to ReadHandle
TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) {
// The client should create and serialize a ReadHandle and a WriteHandle on createBufferMapped.
// The client should create and serialize a ReadHandle and a WriteHandle on
// createBufferMapped.
ClientReadHandle* clientReadHandle = ExpectReadHandleCreation();
ExpectReadHandleSerialization(clientReadHandle);
ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true);
@ -1052,3 +1077,5 @@ TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) {
EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
}
} // namespace dawn::wire

View File

@ -14,8 +14,10 @@
#include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::Return;
class WireOptionalTests : public WireTest {
public:
@ -50,7 +52,8 @@ TEST_F(WireOptionalTests, OptionalObjectValue) {
wgpuDeviceCreateBindGroup(device, &bgDesc);
WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
EXPECT_CALL(api, DeviceCreateBindGroup(
EXPECT_CALL(api,
DeviceCreateBindGroup(
apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && desc->entryCount == 1 &&
desc->entries[0].binding == 0 &&
@ -149,11 +152,13 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilBack.depthFailOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilBack.depthFailOp ==
WGPUStencilOperation_Keep &&
desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.depthFailOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.depthFailOp ==
WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilReadMask == 0xff &&
desc->depthStencil->stencilWriteMask == 0xff &&
@ -168,8 +173,8 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
// Second case: depthStencil is null.
pipelineDescriptor.depthStencil = nullptr;
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
EXPECT_CALL(api,
DeviceCreateRenderPipeline(
EXPECT_CALL(
api, DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->depthStencil == nullptr;
})))
@ -177,3 +182,5 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
FlushClient();
}
} // namespace dawn::wire

View File

@ -17,8 +17,11 @@
#include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::InvokeWithoutArgs;
using testing::Mock;
class MockQueueWorkDoneCallback {
public:
@ -57,7 +60,8 @@ TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) {
}));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this))
.Times(1);
FlushServer();
}
@ -139,3 +143,5 @@ TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) {
// Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks
// since it is always destructed after the test TearDown, and we cannot create a new queue obj
// with wgpuDeviceGetQueue
} // namespace dawn::wire

View File

@ -17,8 +17,13 @@
#include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h"
using namespace testing;
using namespace dawn::wire;
namespace dawn::wire {
using testing::_;
using testing::InvokeWithoutArgs;
using testing::Mock;
using testing::Return;
using testing::StrictMock;
namespace {
@ -50,7 +55,8 @@ class WireShaderModuleTests : public WireTest {
void SetUp() override {
WireTest::SetUp();
mockCompilationInfoCallback = std::make_unique<StrictMock<MockCompilationInfoCallback>>();
mockCompilationInfoCallback =
std::make_unique<StrictMock<MockCompilationInfoCallback>>();
apiShaderModule = api.GetNewShaderModule();
WGPUShaderModuleDescriptor descriptor = {};
@ -123,8 +129,8 @@ TEST_F(WireShaderModuleTests, GetCompilationInfo) {
FlushServer();
}
// Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a device
// loss.
// Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a
// device loss.
TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
@ -234,3 +240,5 @@ TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction)
.Times(1 + testData.numRequests);
wgpuShaderModuleRelease(shaderModule);
}
} // namespace dawn::wire

View File

@ -19,8 +19,11 @@
#include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h"
using namespace testing;
using namespace dawn::wire;
using testing::_;
using testing::AnyNumber;
using testing::Exactly;
using testing::Mock;
using testing::Return;
WireTest::WireTest() {
}
@ -28,11 +31,11 @@ WireTest::WireTest() {
WireTest::~WireTest() {
}
client::MemoryTransferService* WireTest::GetClientMemoryTransferService() {
dawn::wire::client::MemoryTransferService* WireTest::GetClientMemoryTransferService() {
return nullptr;
}
server::MemoryTransferService* WireTest::GetServerMemoryTransferService() {
dawn::wire::server::MemoryTransferService* WireTest::GetServerMemoryTransferService() {
return nullptr;
}
@ -50,19 +53,19 @@ void WireTest::SetUp() {
mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>(mWireServer.get());
WireServerDescriptor serverDesc = {};
dawn::wire::WireServerDescriptor serverDesc = {};
serverDesc.procs = &mockProcs;
serverDesc.serializer = mS2cBuf.get();
serverDesc.memoryTransferService = GetServerMemoryTransferService();
mWireServer.reset(new WireServer(serverDesc));
mWireServer.reset(new dawn::wire::WireServer(serverDesc));
mC2sBuf->SetHandler(mWireServer.get());
WireClientDescriptor clientDesc = {};
dawn::wire::WireClientDescriptor clientDesc = {};
clientDesc.serializer = mC2sBuf.get();
clientDesc.memoryTransferService = GetClientMemoryTransferService();
mWireClient.reset(new WireClient(clientDesc));
mWireClient.reset(new dawn::wire::WireClient(clientDesc));
mS2cBuf->SetHandler(mWireClient.get());
dawnProcSetProcs(&dawn::wire::client::GetProcs());

View File

@ -26,6 +26,8 @@
#include "dawn/utils/ComboRenderPipelineDescriptor.h"
#include "dawn/utils/WGPUHelpers.h"
namespace dawn::native::d3d12 {
constexpr uint32_t kRTSize = 4;
// Pooling tests are required to advance the GPU completed serial to reuse heaps.
@ -33,8 +35,6 @@ constexpr uint32_t kRTSize = 4;
// should be updated if the internals of Tick() change.
constexpr uint32_t kFrameDepth = 2;
using namespace dawn::native::d3d12;
class D3D12DescriptorHeapTests : public DawnTest {
protected:
void SetUp() override {
@ -136,7 +136,8 @@ TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
renderPipelineDescriptor.vertex.module = mSimpleVSModule;
renderPipelineDescriptor.cFragment.module = mSimpleFSModule;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::RenderPipeline renderPipeline =
device.CreateRenderPipeline(&renderPipelineDescriptor);
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
@ -157,7 +158,8 @@ TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
for (uint32_t i = 0; i < heapSize + 1; ++i) {
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
pass.SetBindGroup(0,
utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, uniformBuffer, 0, sizeof(redColor)}}));
pass.Draw(3);
}
@ -175,8 +177,8 @@ TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
// Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating a
// sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
// Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating
// a sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
// because the sampler heap allocations are de-duplicated.
renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
@stage(vertex) fn main() -> @builtin(position) vec4<f32> {
@ -190,7 +192,8 @@ TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
return vec4<f32>(0.0, 0.0, 0.0, 0.0);
})");
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::RenderPipeline renderPipeline =
device.CreateRenderPipeline(&renderPipelineDescriptor);
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
wgpu::Sampler sampler = device.CreateSampler();
@ -209,7 +212,8 @@ TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
pass.SetPipeline(renderPipeline);
for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
pass.SetBindGroup(0,
utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}}));
pass.Draw(3);
}
@ -236,7 +240,8 @@ TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
// Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always unique.
// Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always
// unique.
for (uint32_t i = 0; i < kFrameDepth; i++) {
EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
@ -249,8 +254,8 @@ TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
}
// Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order
// (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in the
// check.
// (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in
// the check.
for (uint32_t i = 0; i < kFrameDepth + 1; i++) {
EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
@ -324,7 +329,8 @@ TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
heapSerial + HeapVersionID(kNumOfSwitches));
EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
// Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|.
// Ensure switched-over heaps can be recycled by advancing the GPU by at-least
// |kFrameDepth|.
for (uint32_t i = 0; i < kFrameDepth; i++) {
mD3DDevice->APITick();
}
@ -415,7 +421,8 @@ TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
// Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|.
// Ensure switched-over heaps can be recycled by advancing the GPU by at-least
// |kFrameDepth|.
for (uint32_t i = 0; i < kFrameDepth; i++) {
mD3DDevice->APITick();
}
@ -433,10 +440,11 @@ TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
// Verify encoding multiple heaps worth of bindgroups.
// Shader-visible heaps will switch out |kNumOfHeaps| times.
TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
// This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup that
// has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize| draws,
// the result is the arithmetic sum of the sequence after the framebuffer is blended by
// accumulation. By checking for this sum, we ensure each bindgroup was encoded correctly.
// This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup
// that has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize|
// draws, the result is the arithmetic sum of the sequence after the framebuffer is blended
// by accumulation. By checking for this sum, we ensure each bindgroup was encoded
// correctly.
DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
@ -469,8 +477,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
const uint32_t heapSize =
mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
->GetShaderVisibleHeapSizeForTesting();
constexpr uint32_t kNumOfHeaps = 2;
@ -479,8 +487,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
std::vector<wgpu::BindGroup> bindGroups;
for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) {
const float color = i + 1;
wgpu::Buffer uniformBuffer =
utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(device, &color, sizeof(color),
wgpu::BufferUsage::Uniform);
bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, uniformBuffer}}));
}
@ -562,15 +570,16 @@ TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, uniformBuffer}}));
bindGroups.push_back(utils::MakeBindGroup(
device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}}));
}
std::array<float, 4> redColor = {1, 0, 0, 1};
wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
bindGroups.push_back(
utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, lastUniformBuffer, 0, sizeof(redColor)}}));
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -596,8 +605,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
// Verify encoding a heaps worth of bindgroups plus one more then reuse the first
// bindgroup in the same submit.
// Shader-visible heaps should switch out once then re-encode the first descriptor at a new offset
// in the heap.
// Shader-visible heaps should switch out once then re-encode the first descriptor at a new
// offset in the heap.
TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
@ -615,11 +624,12 @@ TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
std::vector<wgpu::BindGroup> bindGroups = {
utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
{{0, firstUniformBuffer, 0, sizeof(redColor)}})};
const uint32_t heapSize =
mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
->GetShaderVisibleHeapSizeForTesting();
for (uint32_t i = 0; i < heapSize; i++) {
const std::array<float, 4>& fillColor = GetSolidColor(i + 1); // Avoid black
@ -678,11 +688,12 @@ TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
std::vector<wgpu::BindGroup> bindGroups = {
utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
{{0, firstUniformBuffer, 0, sizeof(redColor)}})};
const uint32_t heapSize =
mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
->GetShaderVisibleHeapSizeForTesting();
for (uint32_t i = 0; i < heapSize; i++) {
std::array<float, 4> fillColor = GetSolidColor(i + 1); // Avoid black
@ -752,8 +763,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
descriptor.sampleCount = 1;
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
descriptor.mipLevelCount = 1;
descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
wgpu::TextureUsage::CopySrc;
descriptor.usage = wgpu::TextureUsage::TextureBinding |
wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
wgpu::Texture texture = device.CreateTexture(&descriptor);
wgpu::TextureView textureView = texture.CreateView();
@ -809,7 +820,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
return textureSample(texture0, sampler0, FragCoord.xy) + buffer0.color;
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
utils::BasicRenderPass renderPass =
utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
@ -828,16 +840,17 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
ShaderVisibleDescriptorAllocator* samplerAllocator =
mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
const HeapVersionID viewHeapSerial = viewAllocator->GetShaderVisibleHeapSerialForTesting();
const HeapVersionID viewHeapSerial =
viewAllocator->GetShaderVisibleHeapSerialForTesting();
const HeapVersionID samplerHeapSerial =
samplerAllocator->GetShaderVisibleHeapSerialForTesting();
const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting();
// "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per
// group. This means the count of heaps switches is determined by the total number of views
// to encode. Compute the number of bindgroups to encode by counting the required views for
// |kNumOfViewHeaps| heaps worth.
// group. This means the count of heaps switches is determined by the total number of
// views to encode. Compute the number of bindgroups to encode by counting the required
// views for |kNumOfViewHeaps| heaps worth.
constexpr uint32_t kViewsPerBindGroup = 3;
constexpr uint32_t kNumOfViewHeaps = 5;
@ -850,7 +863,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
bindGroups.push_back(
utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
{{0, transformBuffer, 0, sizeof(transform)},
{1, sampler},
{2, textureView},
@ -861,7 +875,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
bindGroups.push_back(
utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
{{0, transformBuffer, 0, sizeof(transform)},
{1, sampler},
{2, textureView},
@ -1051,3 +1066,5 @@ TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
D3D12Backend(),
D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
} // namespace dawn::native::d3d12

View File

@ -20,8 +20,9 @@
#include "dawn/tests/DawnTest.h"
#include "dawn/utils/WGPUHelpers.h"
namespace dawn::native::d3d12 {
namespace {
class ExpectBetweenTimestamps : public detail::Expectation {
class ExpectBetweenTimestamps : public ::detail::Expectation {
public:
~ExpectBetweenTimestamps() override = default;
@ -51,8 +52,6 @@ namespace {
} // anonymous namespace
using namespace dawn::native::d3d12;
class D3D12GPUTimestampCalibrationTests : public DawnTest {
protected:
void SetUp() override {
@ -86,8 +85,8 @@ TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
wgpu::BufferDescriptor bufferDescriptor;
bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
bufferDescriptor.usage =
wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
bufferDescriptor.usage = wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc |
wgpu::BufferUsage::CopyDst;
wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -117,3 +116,5 @@ TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
D3D12Backend({"disable_timestamp_query_conversion"}));
} // namespace dawn::native::d3d12

View File

@ -18,7 +18,7 @@
#include "dawn/native/d3d12/TextureD3D12.h"
#include "dawn/tests/DawnTest.h"
using namespace dawn::native::d3d12;
namespace dawn::native::d3d12 {
class D3D12ResourceHeapTests : public DawnTest {
protected:
@ -106,3 +106,5 @@ TEST_P(D3D12ResourceHeapTests, AlignUBO) {
}
DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());
} // namespace dawn::native::d3d12