diff --git a/src/dawn/CPPLINT.cfg b/src/dawn/CPPLINT.cfg index c03a353c2f..a62541f5c5 100644 --- a/src/dawn/CPPLINT.cfg +++ b/src/dawn/CPPLINT.cfg @@ -1,3 +1,2 @@ -filter=-build/namespaces filter=-readability/todo filter=-runtime/indentation_namespace diff --git a/src/dawn/native/metal/DeviceMTL.h b/src/dawn/native/metal/DeviceMTL.h index cbd362c9c3..9794dd5d86 100644 --- a/src/dawn/native/metal/DeviceMTL.h +++ b/src/dawn/native/metal/DeviceMTL.h @@ -32,9 +32,7 @@ namespace dawn::native::metal { - namespace { - struct KalmanInfo; - } + struct KalmanInfo; class Device final : public DeviceBase { public: diff --git a/src/dawn/native/metal/DeviceMTL.mm b/src/dawn/native/metal/DeviceMTL.mm index 375f1cbc27..fdff8a05c2 100644 --- a/src/dawn/native/metal/DeviceMTL.mm +++ b/src/dawn/native/metal/DeviceMTL.mm @@ -42,18 +42,18 @@ namespace dawn::native::metal { + struct KalmanInfo { + float filterValue; // The estimation value + float kalmanGain; // The kalman gain + float R; // The covariance of the observation noise + float P; // The a posteriori estimate covariance + }; + namespace { // The time interval for each round of kalman filter static constexpr uint64_t kFilterIntervalInMs = static_cast(NSEC_PER_SEC / 10); - struct KalmanInfo { - float filterValue; // The estimation value - float kalmanGain; // The kalman gain - float R; // The covariance of the observation noise - float P; // The a posteriori estimate covariance - }; - // A simplified kalman filter for estimating timestamp period based on measured values float KalmanFilter(KalmanInfo* info, float measuredValue) { // Optimize kalman gain diff --git a/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp index de9a1c55a8..f283803244 100644 --- a/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp +++ b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp @@ -45,7 +45,9 @@ namespace { - using namespace testing; + using testing::_; + using testing::MockCallback; + using testing::SaveArg; class AdapterDiscoveryTests : public ::testing::Test {}; diff --git a/src/dawn/tests/end2end/DeviceLostTests.cpp b/src/dawn/tests/end2end/DeviceLostTests.cpp index 150081b329..188f8ddc98 100644 --- a/src/dawn/tests/end2end/DeviceLostTests.cpp +++ b/src/dawn/tests/end2end/DeviceLostTests.cpp @@ -22,7 +22,9 @@ #include "dawn/utils/ComboRenderPipelineDescriptor.h" #include "dawn/utils/WGPUHelpers.h" -using namespace testing; +using testing::_; +using testing::Exactly; +using testing::MockCallback; class MockDeviceLostCallback { public: diff --git a/src/dawn/tests/end2end/QueueTimelineTests.cpp b/src/dawn/tests/end2end/QueueTimelineTests.cpp index 879728093c..b07ca08d54 100644 --- a/src/dawn/tests/end2end/QueueTimelineTests.cpp +++ b/src/dawn/tests/end2end/QueueTimelineTests.cpp @@ -17,7 +17,7 @@ #include "dawn/tests/DawnTest.h" #include "gmock/gmock.h" -using namespace testing; +using testing::InSequence; class MockMapCallback { public: @@ -67,7 +67,7 @@ class QueueTimelineTests : public DawnTest { // when queue.OnSubmittedWorkDone is called after mMapReadBuffer.MapAsync. The callback order should // happen in the order the functions are called. TEST_P(QueueTimelineTests, MapRead_OnWorkDone) { - testing::InSequence sequence; + InSequence sequence; EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1); EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1); @@ -83,7 +83,7 @@ TEST_P(QueueTimelineTests, MapRead_OnWorkDone) { // queue.Signal is called before mMapReadBuffer.MapAsync. The callback order should // happen in the order the functions are called. TEST_P(QueueTimelineTests, OnWorkDone_MapRead) { - testing::InSequence sequence; + InSequence sequence; EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1); EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1); diff --git a/src/dawn/tests/unittests/BuddyAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp index eb0c67efab..2c76322384 100644 --- a/src/dawn/tests/unittests/BuddyAllocatorTests.cpp +++ b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp @@ -17,313 +17,315 @@ #include "dawn/native/BuddyAllocator.h" #include "gtest/gtest.h" -using namespace dawn::native; +namespace dawn::native { -constexpr uint64_t BuddyAllocator::kInvalidOffset; + constexpr uint64_t BuddyAllocator::kInvalidOffset; -// Verify the buddy allocator with a basic test. -TEST(BuddyAllocatorTests, SingleBlock) { - // After one 32 byte allocation: - // - // Level -------------------------------- - // 0 32 | A | - // -------------------------------- - // - constexpr uint64_t maxBlockSize = 32; - BuddyAllocator allocator(maxBlockSize); - - // Check that we cannot allocate a oversized block. - ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset); - - // Check that we cannot allocate a zero sized block. - ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset); - - // Allocate the block. - uint64_t blockOffset = allocator.Allocate(maxBlockSize); - ASSERT_EQ(blockOffset, 0u); - - // Check that we are full. - ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); - - // Deallocate the block. - allocator.Deallocate(blockOffset); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); -} - -// Verify multiple allocations succeeds using a buddy allocator. -TEST(BuddyAllocatorTests, MultipleBlocks) { - // Fill every level in the allocator (order-n = 2^n) - const uint64_t maxBlockSize = (1ull << 16); - for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) { + // Verify the buddy allocator with a basic test. + TEST(BuddyAllocatorTests, SingleBlock) { + // After one 32 byte allocation: + // + // Level -------------------------------- + // 0 32 | A | + // -------------------------------- + // + constexpr uint64_t maxBlockSize = 32; BuddyAllocator allocator(maxBlockSize); - uint64_t blockSize = (1ull << order); - for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) { - ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki); + // Check that we cannot allocate a oversized block. + ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset); + + // Check that we cannot allocate a zero sized block. + ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset); + + // Allocate the block. + uint64_t blockOffset = allocator.Allocate(maxBlockSize); + ASSERT_EQ(blockOffset, 0u); + + // Check that we are full. + ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); + + // Deallocate the block. + allocator.Deallocate(blockOffset); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + } + + // Verify multiple allocations succeeds using a buddy allocator. + TEST(BuddyAllocatorTests, MultipleBlocks) { + // Fill every level in the allocator (order-n = 2^n) + const uint64_t maxBlockSize = (1ull << 16); + for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) { + BuddyAllocator allocator(maxBlockSize); + + uint64_t blockSize = (1ull << order); + for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) { + ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki); + } } } -} -// Verify that a single allocation succeeds using a buddy allocator. -TEST(BuddyAllocatorTests, SingleSplitBlock) { - // After one 8 byte allocation: - // - // Level -------------------------------- - // 0 32 | S | - // -------------------------------- - // 1 16 | S | F | S - split - // -------------------------------- F - free - // 2 8 | A | F | | | A - allocated - // -------------------------------- - // - constexpr uint64_t maxBlockSize = 32; - BuddyAllocator allocator(maxBlockSize); + // Verify that a single allocation succeeds using a buddy allocator. + TEST(BuddyAllocatorTests, SingleSplitBlock) { + // After one 8 byte allocation: + // + // Level -------------------------------- + // 0 32 | S | + // -------------------------------- + // 1 16 | S | F | S - split + // -------------------------------- F - free + // 2 8 | A | F | | | A - allocated + // -------------------------------- + // + constexpr uint64_t maxBlockSize = 32; + BuddyAllocator allocator(maxBlockSize); - // Allocate block (splits two blocks). - uint64_t blockOffset = allocator.Allocate(8); - ASSERT_EQ(blockOffset, 0u); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + // Allocate block (splits two blocks). + uint64_t blockOffset = allocator.Allocate(8); + ASSERT_EQ(blockOffset, 0u); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); - // Deallocate block (merges two blocks). - allocator.Deallocate(blockOffset); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + // Deallocate block (merges two blocks). + allocator.Deallocate(blockOffset); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); - // Check that we cannot allocate a block that is oversized. - ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset); + // Check that we cannot allocate a block that is oversized. + ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset); - // Re-allocate the largest block allowed after merging. - blockOffset = allocator.Allocate(maxBlockSize); - ASSERT_EQ(blockOffset, 0u); + // Re-allocate the largest block allowed after merging. + blockOffset = allocator.Allocate(maxBlockSize); + ASSERT_EQ(blockOffset, 0u); - allocator.Deallocate(blockOffset); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); -} - -// Verify that a multiple allocated blocks can be removed in the free-list. -TEST(BuddyAllocatorTests, MultipleSplitBlocks) { - // After four 16 byte allocations: - // - // Level -------------------------------- - // 0 32 | S | - // -------------------------------- - // 1 16 | S | S | S - split - // -------------------------------- F - free - // 2 8 | Aa | Ab | Ac | Ad | A - allocated - // -------------------------------- - // - constexpr uint64_t maxBlockSize = 32; - BuddyAllocator allocator(maxBlockSize); - - // Populates the free-list with four blocks at Level2. - - // Allocate "a" block (two splits). - constexpr uint64_t blockSizeInBytes = 8; - uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes); - ASSERT_EQ(blockOffsetA, 0u); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); - - // Allocate "b" block. - uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes); - ASSERT_EQ(blockOffsetB, blockSizeInBytes); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); - - // Allocate "c" block (three splits). - uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes); - ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); - - // Allocate "d" block. - uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes); - ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); - - // Deallocate "d" block. - // FreeList[Level2] = [BlockD] -> x - allocator.Deallocate(blockOffsetD); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); - - // Deallocate "b" block. - // FreeList[Level2] = [BlockB] -> [BlockD] -> x - allocator.Deallocate(blockOffsetB); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); - - // Deallocate "c" block (one merges). - // FreeList[Level1] = [BlockCD] -> x - // FreeList[Level2] = [BlockB] -> x - allocator.Deallocate(blockOffsetC); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); - - // Deallocate "a" block (two merges). - // FreeList[Level0] = [BlockABCD] -> x - allocator.Deallocate(blockOffsetA); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); -} - -// Verify the buddy allocator can handle allocations of various sizes. -TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) { - // After four Level4-to-Level1 byte then one L4 block allocations: - // - // Level ----------------------------------------------------------------- - // 0 512 | S | - // ----------------------------------------------------------------- - // 1 256 | S | A | - // ----------------------------------------------------------------- - // 2 128 | S | A | | | - // ----------------------------------------------------------------- - // 3 64 | S | A | | | | | | | - // ----------------------------------------------------------------- - // 4 32 | A | F | | | | | | | | | | | | | | | - // ----------------------------------------------------------------- - // - constexpr uint64_t maxBlockSize = 512; - BuddyAllocator allocator(maxBlockSize); - - ASSERT_EQ(allocator.Allocate(32), 0ull); - ASSERT_EQ(allocator.Allocate(64), 64ull); - ASSERT_EQ(allocator.Allocate(128), 128ull); - ASSERT_EQ(allocator.Allocate(256), 256ull); - - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); - - // Fill in the last free block. - ASSERT_EQ(allocator.Allocate(32), 32ull); - - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); - - // Check if we're full. - ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset); -} - -// Verify very small allocations using a larger allocator works correctly. -TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) { - // After allocating four pairs of one 64 byte block and one 32 byte block. - // - // Level ----------------------------------------------------------------- - // 0 512 | S | - // ----------------------------------------------------------------- - // 1 256 | S | S | - // ----------------------------------------------------------------- - // 2 128 | S | S | S | F | - // ----------------------------------------------------------------- - // 3 64 | A | S | A | A | S | A | | | - // ----------------------------------------------------------------- - // 4 32 | | | A | A | | | | | A | A | | | | | | | - // ----------------------------------------------------------------- - // - constexpr uint64_t maxBlockSize = 512; - BuddyAllocator allocator(maxBlockSize); - - ASSERT_EQ(allocator.Allocate(64), 0ull); - ASSERT_EQ(allocator.Allocate(32), 64ull); - - ASSERT_EQ(allocator.Allocate(64), 128ull); - ASSERT_EQ(allocator.Allocate(32), 96ull); - - ASSERT_EQ(allocator.Allocate(64), 192ull); - ASSERT_EQ(allocator.Allocate(32), 256ull); - - ASSERT_EQ(allocator.Allocate(64), 320ull); - ASSERT_EQ(allocator.Allocate(32), 288ull); - - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); -} - -// Verify the buddy allocator can deal with bad fragmentation. -TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) { - // Allocate every leaf then de-allocate every other of those allocations. - // - // Level ----------------------------------------------------------------- - // 0 512 | S | - // ----------------------------------------------------------------- - // 1 256 | S | S | - // ----------------------------------------------------------------- - // 2 128 | S | S | S | S | - // ----------------------------------------------------------------- - // 3 64 | S | S | S | S | S | S | S | S | - // ----------------------------------------------------------------- - // 4 32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F | - // ----------------------------------------------------------------- - // - constexpr uint64_t maxBlockSize = 512; - BuddyAllocator allocator(maxBlockSize); - - // Allocate leaf blocks - constexpr uint64_t minBlockSizeInBytes = 32; - std::vector blockOffsets; - for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) { - blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes)); + allocator.Deallocate(blockOffset); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); } - // Free every other leaf block. - for (size_t count = 1; count < blockOffsets.size(); count += 2) { - allocator.Deallocate(blockOffsets[count]); + // Verify that a multiple allocated blocks can be removed in the free-list. + TEST(BuddyAllocatorTests, MultipleSplitBlocks) { + // After four 16 byte allocations: + // + // Level -------------------------------- + // 0 32 | S | + // -------------------------------- + // 1 16 | S | S | S - split + // -------------------------------- F - free + // 2 8 | Aa | Ab | Ac | Ad | A - allocated + // -------------------------------- + // + constexpr uint64_t maxBlockSize = 32; + BuddyAllocator allocator(maxBlockSize); + + // Populates the free-list with four blocks at Level2. + + // Allocate "a" block (two splits). + constexpr uint64_t blockSizeInBytes = 8; + uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes); + ASSERT_EQ(blockOffsetA, 0u); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + + // Allocate "b" block. + uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes); + ASSERT_EQ(blockOffsetB, blockSizeInBytes); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + + // Allocate "c" block (three splits). + uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes); + ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + + // Allocate "d" block. + uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes); + ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); + + // Deallocate "d" block. + // FreeList[Level2] = [BlockD] -> x + allocator.Deallocate(blockOffsetD); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + + // Deallocate "b" block. + // FreeList[Level2] = [BlockB] -> [BlockD] -> x + allocator.Deallocate(blockOffsetB); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + + // Deallocate "c" block (one merges). + // FreeList[Level1] = [BlockCD] -> x + // FreeList[Level2] = [BlockB] -> x + allocator.Deallocate(blockOffsetC); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + + // Deallocate "a" block (two merges). + // FreeList[Level0] = [BlockABCD] -> x + allocator.Deallocate(blockOffsetA); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); } - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u); -} + // Verify the buddy allocator can handle allocations of various sizes. + TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) { + // After four Level4-to-Level1 byte then one L4 block allocations: + // + // Level ----------------------------------------------------------------- + // 0 512 | S | + // ----------------------------------------------------------------- + // 1 256 | S | A | + // ----------------------------------------------------------------- + // 2 128 | S | A | | | + // ----------------------------------------------------------------- + // 3 64 | S | A | | | | | | | + // ----------------------------------------------------------------- + // 4 32 | A | F | | | | | | | | | | | | | | | + // ----------------------------------------------------------------- + // + constexpr uint64_t maxBlockSize = 512; + BuddyAllocator allocator(maxBlockSize); -// Verify the buddy allocator can deal with multiple allocations with mixed alignments. -TEST(BuddyAllocatorTests, SameSizeVariousAlignment) { - // After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8 byte - // alignment. - // - // Level -------------------------------- - // 0 32 | S | - // -------------------------------- - // 1 16 | S | S | S - split - // -------------------------------- F - free - // 2 8 | Aa | F | Ab | Ac | A - allocated - // -------------------------------- - // - BuddyAllocator allocator(32); + ASSERT_EQ(allocator.Allocate(32), 0ull); + ASSERT_EQ(allocator.Allocate(64), 64ull); + ASSERT_EQ(allocator.Allocate(128), 128ull); + ASSERT_EQ(allocator.Allocate(256), 256ull); - // Allocate Aa (two splits). - ASSERT_EQ(allocator.Allocate(8, 16), 0u); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); - // Allocate Ab (skip Aa buddy due to alignment and perform another split). - ASSERT_EQ(allocator.Allocate(8, 16), 16u); + // Fill in the last free block. + ASSERT_EQ(allocator.Allocate(32), 32ull); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); - // Check that we cannot fit another. - ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset); + // Check if we're full. + ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset); + } - // Allocate Ac (zero splits and Ab's buddy is now the first free block). - ASSERT_EQ(allocator.Allocate(8, 8), 24u); + // Verify very small allocations using a larger allocator works correctly. + TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) { + // After allocating four pairs of one 64 byte block and one 32 byte block. + // + // Level ----------------------------------------------------------------- + // 0 512 | S | + // ----------------------------------------------------------------- + // 1 256 | S | S | + // ----------------------------------------------------------------- + // 2 128 | S | S | S | F | + // ----------------------------------------------------------------- + // 3 64 | A | S | A | A | S | A | | | + // ----------------------------------------------------------------- + // 4 32 | | | A | A | | | | | A | A | | | | | | | + // ----------------------------------------------------------------- + // + constexpr uint64_t maxBlockSize = 512; + BuddyAllocator allocator(maxBlockSize); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); -} + ASSERT_EQ(allocator.Allocate(64), 0ull); + ASSERT_EQ(allocator.Allocate(32), 64ull); -// Verify the buddy allocator can deal with multiple allocations with equal alignments. -TEST(BuddyAllocatorTests, VariousSizeSameAlignment) { - // After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4 byte - // alignment. - // - // Level -------------------------------- - // 0 32 | S | - // -------------------------------- - // 1 16 | S | Ac | S - split - // -------------------------------- F - free - // 2 8 | Aa | Ab | | A - allocated - // -------------------------------- - // - constexpr uint64_t maxBlockSize = 32; - constexpr uint64_t alignment = 4; - BuddyAllocator allocator(maxBlockSize); + ASSERT_EQ(allocator.Allocate(64), 128ull); + ASSERT_EQ(allocator.Allocate(32), 96ull); - // Allocate block Aa (two splits) - ASSERT_EQ(allocator.Allocate(8, alignment), 0u); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + ASSERT_EQ(allocator.Allocate(64), 192ull); + ASSERT_EQ(allocator.Allocate(32), 256ull); - // Allocate block Ab (Aa's buddy) - ASSERT_EQ(allocator.Allocate(8, alignment), 8u); + ASSERT_EQ(allocator.Allocate(64), 320ull); + ASSERT_EQ(allocator.Allocate(32), 288ull); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + } - // Check that we can still allocate Ac. - ASSERT_EQ(allocator.Allocate(16, alignment), 16ull); + // Verify the buddy allocator can deal with bad fragmentation. + TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) { + // Allocate every leaf then de-allocate every other of those allocations. + // + // Level ----------------------------------------------------------------- + // 0 512 | S | + // ----------------------------------------------------------------- + // 1 256 | S | S | + // ----------------------------------------------------------------- + // 2 128 | S | S | S | S | + // ----------------------------------------------------------------- + // 3 64 | S | S | S | S | S | S | S | S | + // ----------------------------------------------------------------- + // 4 32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F | + // ----------------------------------------------------------------- + // + constexpr uint64_t maxBlockSize = 512; + BuddyAllocator allocator(maxBlockSize); - ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); -} + // Allocate leaf blocks + constexpr uint64_t minBlockSizeInBytes = 32; + std::vector blockOffsets; + for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) { + blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes)); + } + + // Free every other leaf block. + for (size_t count = 1; count < blockOffsets.size(); count += 2) { + allocator.Deallocate(blockOffsets[count]); + } + + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u); + } + + // Verify the buddy allocator can deal with multiple allocations with mixed alignments. + TEST(BuddyAllocatorTests, SameSizeVariousAlignment) { + // After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8 + // byte alignment. + // + // Level -------------------------------- + // 0 32 | S | + // -------------------------------- + // 1 16 | S | S | S - split + // -------------------------------- F - free + // 2 8 | Aa | F | Ab | Ac | A - allocated + // -------------------------------- + // + BuddyAllocator allocator(32); + + // Allocate Aa (two splits). + ASSERT_EQ(allocator.Allocate(8, 16), 0u); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + + // Allocate Ab (skip Aa buddy due to alignment and perform another split). + ASSERT_EQ(allocator.Allocate(8, 16), 16u); + + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + + // Check that we cannot fit another. + ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset); + + // Allocate Ac (zero splits and Ab's buddy is now the first free block). + ASSERT_EQ(allocator.Allocate(8, 8), 24u); + + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + } + + // Verify the buddy allocator can deal with multiple allocations with equal alignments. + TEST(BuddyAllocatorTests, VariousSizeSameAlignment) { + // After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4 + // byte alignment. + // + // Level -------------------------------- + // 0 32 | S | + // -------------------------------- + // 1 16 | S | Ac | S - split + // -------------------------------- F - free + // 2 8 | Aa | Ab | | A - allocated + // -------------------------------- + // + constexpr uint64_t maxBlockSize = 32; + constexpr uint64_t alignment = 4; + BuddyAllocator allocator(maxBlockSize); + + // Allocate block Aa (two splits) + ASSERT_EQ(allocator.Allocate(8, alignment), 0u); + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); + + // Allocate block Ab (Aa's buddy) + ASSERT_EQ(allocator.Allocate(8, alignment), 8u); + + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); + + // Check that we can still allocate Ac. + ASSERT_EQ(allocator.Allocate(16, alignment), 16ull); + + ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); + } + +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp index f38e3e9edc..c70af255ad 100644 --- a/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp +++ b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp @@ -22,440 +22,443 @@ #include "dawn/native/PooledResourceMemoryAllocator.h" #include "dawn/native/ResourceHeapAllocator.h" -using namespace dawn::native; +namespace dawn::native { -class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator { - public: - ResultOrError> AllocateResourceHeap(uint64_t size) override { - return std::make_unique(); - } - void DeallocateResourceHeap(std::unique_ptr allocation) override { - } -}; + class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator { + public: + ResultOrError> AllocateResourceHeap( + uint64_t size) override { + return std::make_unique(); + } + void DeallocateResourceHeap(std::unique_ptr allocation) override { + } + }; -class PlaceholderBuddyResourceAllocator { - public: - PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize) - : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) { + class PlaceholderBuddyResourceAllocator { + public: + PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize) + : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) { + } + + PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, + uint64_t memorySize, + ResourceHeapAllocator* heapAllocator) + : mAllocator(maxBlockSize, memorySize, heapAllocator) { + } + + ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) { + ResultOrError result = + mAllocator.Allocate(allocationSize, alignment); + return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{}; + } + + void Deallocate(ResourceMemoryAllocation& allocation) { + mAllocator.Deallocate(allocation); + } + + uint64_t ComputeTotalNumOfHeapsForTesting() const { + return mAllocator.ComputeTotalNumOfHeapsForTesting(); + } + + private: + PlaceholderResourceHeapAllocator mHeapAllocator; + BuddyMemoryAllocator mAllocator; + }; + + // Verify a single resource allocation in a single heap. + TEST(BuddyMemoryAllocatorTests, SingleHeap) { + // After one 128 byte resource allocation: + // + // max block size -> --------------------------- + // | A1/H0 | Hi - Heap at index i + // max heap size -> --------------------------- An - Resource allocation n + // + constexpr uint64_t heapSize = 128; + constexpr uint64_t maxBlockSize = heapSize; + PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); + + // Cannot allocate greater than heap size. + ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2); + ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); + + // Allocate one 128 byte allocation (same size as heap). + ResourceMemoryAllocation allocation1 = allocator.Allocate(128); + ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); + ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); + + // Cannot allocate when allocator is full. + invalidAllocation = allocator.Allocate(128); + ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); + + allocator.Deallocate(allocation1); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); } - PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, - uint64_t memorySize, - ResourceHeapAllocator* heapAllocator) - : mAllocator(maxBlockSize, memorySize, heapAllocator) { + // Verify that multiple allocation are created in separate heaps. + TEST(BuddyMemoryAllocatorTests, MultipleHeaps) { + // After two 128 byte resource allocations: + // + // max block size -> --------------------------- + // | | Hi - Heap at index i + // max heap size -> --------------------------- An - Resource allocation n + // | A1/H0 | A2/H1 | + // --------------------------- + // + constexpr uint64_t maxBlockSize = 256; + constexpr uint64_t heapSize = 128; + PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); + + // Cannot allocate greater than heap size. + ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2); + ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); + + // Cannot allocate greater than max block size. + invalidAllocation = allocator.Allocate(maxBlockSize * 2); + ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); + + // Allocate two 128 byte allocations. + ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize); + ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); + ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + // First allocation creates first heap. + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); + + ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize); + ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize); + ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + // Second allocation creates second heap. + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); + ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); + + // Deallocate both allocations + allocator.Deallocate(allocation1); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0 + + allocator.Deallocate(allocation2); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1 } - ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) { - ResultOrError result = - mAllocator.Allocate(allocationSize, alignment); - return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{}; + // Verify multiple sub-allocations can re-use heaps. + TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) { + // After two 64 byte allocations with 128 byte heaps. + // + // max block size -> --------------------------- + // | | Hi - Heap at index i + // max heap size -> --------------------------- An - Resource allocation n + // | H0 | H1 | + // --------------------------- + // | A1 | A2 | A3 | | + // --------------------------- + // + constexpr uint64_t maxBlockSize = 256; + constexpr uint64_t heapSize = 128; + PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); + + // Allocate two 64 byte sub-allocations. + ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2); + ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); + ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + // First sub-allocation creates first heap. + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); + + ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2); + ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2); + ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + // Second allocation re-uses first heap. + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); + ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); + + ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2); + ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize); + ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + // Third allocation creates second heap. + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); + ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap()); + + // Deallocate all allocations in reverse order. + allocator.Deallocate(allocation1); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), + 2u); // A2 pins H0. + + allocator.Deallocate(allocation2); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0 + + allocator.Deallocate(allocation3); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1 } - void Deallocate(ResourceMemoryAllocation& allocation) { - mAllocator.Deallocate(allocation); + // Verify resource sub-allocation of various sizes over multiple heaps. + TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) { + // After three 64 byte allocations and two 128 byte allocations. + // + // max block size -> ------------------------------------------------------- + // | | + // ------------------------------------------------------- + // | | | + // max heap size -> ------------------------------------------------------- + // | H0 | A3/H1 | H2 | A5/H3 | + // ------------------------------------------------------- + // | A1 | A2 | | A4 | | | + // ------------------------------------------------------- + // + constexpr uint64_t heapSize = 128; + constexpr uint64_t maxBlockSize = 512; + PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); + + // Allocate two 64-byte allocations. + ResourceMemoryAllocation allocation1 = allocator.Allocate(64); + ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); + ASSERT_EQ(allocation1.GetOffset(), 0u); + ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ResourceMemoryAllocation allocation2 = allocator.Allocate(64); + ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u); + ASSERT_EQ(allocation2.GetOffset(), 64u); + ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + // A1 and A2 share H0 + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); + ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); + + ResourceMemoryAllocation allocation3 = allocator.Allocate(128); + ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u); + ASSERT_EQ(allocation3.GetOffset(), 0u); + ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + // A3 creates and fully occupies a new heap. + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); + ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap()); + + ResourceMemoryAllocation allocation4 = allocator.Allocate(64); + ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u); + ASSERT_EQ(allocation4.GetOffset(), 0u); + ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); + ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap()); + + // R5 size forms 64 byte hole after R4. + ResourceMemoryAllocation allocation5 = allocator.Allocate(128); + ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u); + ASSERT_EQ(allocation5.GetOffset(), 0u); + ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u); + ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap()); + + // Deallocate allocations in staggered order. + allocator.Deallocate(allocation1); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u); // A2 pins H0 + + allocator.Deallocate(allocation5); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); // Released H3 + + allocator.Deallocate(allocation2); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); // Released H0 + + allocator.Deallocate(allocation4); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H2 + + allocator.Deallocate(allocation3); + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1 } - uint64_t ComputeTotalNumOfHeapsForTesting() const { - return mAllocator.ComputeTotalNumOfHeapsForTesting(); + // Verify resource sub-allocation of same sizes with various alignments. + TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) { + // After three 64 byte and one 128 byte resource allocations. + // + // max block size -> ------------------------------------------------------- + // | | + // ------------------------------------------------------- + // | | | + // max heap size -> ------------------------------------------------------- + // | H0 | H1 | H2 | | + // ------------------------------------------------------- + // | A1 | | A2 | | A3 | A4 | | + // ------------------------------------------------------- + // + constexpr uint64_t heapSize = 128; + constexpr uint64_t maxBlockSize = 512; + PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); + + ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128); + ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); + ASSERT_EQ(allocation1.GetOffset(), 0u); + ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); + + ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128); + ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u); + ASSERT_EQ(allocation2.GetOffset(), 0u); + ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); + ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); + + ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128); + ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u); + ASSERT_EQ(allocation3.GetOffset(), 0u); + ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); + ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap()); + + ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64); + ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u); + ASSERT_EQ(allocation4.GetOffset(), 64u); + ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); + ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap()); } - private: - PlaceholderResourceHeapAllocator mHeapAllocator; - BuddyMemoryAllocator mAllocator; -}; - -// Verify a single resource allocation in a single heap. -TEST(BuddyMemoryAllocatorTests, SingleHeap) { - // After one 128 byte resource allocation: - // - // max block size -> --------------------------- - // | A1/H0 | Hi - Heap at index i - // max heap size -> --------------------------- An - Resource allocation n - // - constexpr uint64_t heapSize = 128; - constexpr uint64_t maxBlockSize = heapSize; - PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - - // Cannot allocate greater than heap size. - ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2); - ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); - - // Allocate one 128 byte allocation (same size as heap). - ResourceMemoryAllocation allocation1 = allocator.Allocate(128); - ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); - ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); - - // Cannot allocate when allocator is full. - invalidAllocation = allocator.Allocate(128); - ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); - - allocator.Deallocate(allocation1); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); -} - -// Verify that multiple allocation are created in separate heaps. -TEST(BuddyMemoryAllocatorTests, MultipleHeaps) { - // After two 128 byte resource allocations: - // - // max block size -> --------------------------- - // | | Hi - Heap at index i - // max heap size -> --------------------------- An - Resource allocation n - // | A1/H0 | A2/H1 | - // --------------------------- - // - constexpr uint64_t maxBlockSize = 256; - constexpr uint64_t heapSize = 128; - PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - - // Cannot allocate greater than heap size. - ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2); - ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); - - // Cannot allocate greater than max block size. - invalidAllocation = allocator.Allocate(maxBlockSize * 2); - ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); - - // Allocate two 128 byte allocations. - ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize); - ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); - ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - // First allocation creates first heap. - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); - - ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize); - ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize); - ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - // Second allocation creates second heap. - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); - ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); - - // Deallocate both allocations - allocator.Deallocate(allocation1); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0 - - allocator.Deallocate(allocation2); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1 -} - -// Verify multiple sub-allocations can re-use heaps. -TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) { - // After two 64 byte allocations with 128 byte heaps. - // - // max block size -> --------------------------- - // | | Hi - Heap at index i - // max heap size -> --------------------------- An - Resource allocation n - // | H0 | H1 | - // --------------------------- - // | A1 | A2 | A3 | | - // --------------------------- - // - constexpr uint64_t maxBlockSize = 256; - constexpr uint64_t heapSize = 128; - PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - - // Allocate two 64 byte sub-allocations. - ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2); - ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); - ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - // First sub-allocation creates first heap. - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); - - ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2); - ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2); - ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - // Second allocation re-uses first heap. - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); - ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); - - ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2); - ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize); - ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - // Third allocation creates second heap. - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); - ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap()); - - // Deallocate all allocations in reverse order. - allocator.Deallocate(allocation1); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), - 2u); // A2 pins H0. - - allocator.Deallocate(allocation2); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0 - - allocator.Deallocate(allocation3); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1 -} - -// Verify resource sub-allocation of various sizes over multiple heaps. -TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) { - // After three 64 byte allocations and two 128 byte allocations. - // - // max block size -> ------------------------------------------------------- - // | | - // ------------------------------------------------------- - // | | | - // max heap size -> ------------------------------------------------------- - // | H0 | A3/H1 | H2 | A5/H3 | - // ------------------------------------------------------- - // | A1 | A2 | | A4 | | | - // ------------------------------------------------------- - // - constexpr uint64_t heapSize = 128; - constexpr uint64_t maxBlockSize = 512; - PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - - // Allocate two 64-byte allocations. - ResourceMemoryAllocation allocation1 = allocator.Allocate(64); - ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); - ASSERT_EQ(allocation1.GetOffset(), 0u); - ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ResourceMemoryAllocation allocation2 = allocator.Allocate(64); - ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u); - ASSERT_EQ(allocation2.GetOffset(), 64u); - ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - // A1 and A2 share H0 - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); - ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); - - ResourceMemoryAllocation allocation3 = allocator.Allocate(128); - ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u); - ASSERT_EQ(allocation3.GetOffset(), 0u); - ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - // A3 creates and fully occupies a new heap. - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); - ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap()); - - ResourceMemoryAllocation allocation4 = allocator.Allocate(64); - ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u); - ASSERT_EQ(allocation4.GetOffset(), 0u); - ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); - ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap()); - - // R5 size forms 64 byte hole after R4. - ResourceMemoryAllocation allocation5 = allocator.Allocate(128); - ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u); - ASSERT_EQ(allocation5.GetOffset(), 0u); - ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u); - ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap()); - - // Deallocate allocations in staggered order. - allocator.Deallocate(allocation1); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u); // A2 pins H0 - - allocator.Deallocate(allocation5); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); // Released H3 - - allocator.Deallocate(allocation2); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); // Released H0 - - allocator.Deallocate(allocation4); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H2 - - allocator.Deallocate(allocation3); - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1 -} - -// Verify resource sub-allocation of same sizes with various alignments. -TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) { - // After three 64 byte and one 128 byte resource allocations. - // - // max block size -> ------------------------------------------------------- - // | | - // ------------------------------------------------------- - // | | | - // max heap size -> ------------------------------------------------------- - // | H0 | H1 | H2 | | - // ------------------------------------------------------- - // | A1 | | A2 | | A3 | A4 | | - // ------------------------------------------------------- - // - constexpr uint64_t heapSize = 128; - constexpr uint64_t maxBlockSize = 512; - PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - - ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128); - ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); - ASSERT_EQ(allocation1.GetOffset(), 0u); - ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); - - ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128); - ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u); - ASSERT_EQ(allocation2.GetOffset(), 0u); - ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); - ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); - - ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128); - ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u); - ASSERT_EQ(allocation3.GetOffset(), 0u); - ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); - ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap()); - - ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64); - ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u); - ASSERT_EQ(allocation4.GetOffset(), 64u); - ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); - ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap()); -} - -// Verify resource sub-allocation of various sizes with same alignments. -TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) { - // After two 64 byte and two 128 byte resource allocations: - // - // max block size -> ------------------------------------------------------- - // | | - // ------------------------------------------------------- - // | | | - // max heap size -> ------------------------------------------------------- - // | H0 | A3/H1 | A4/H2 | | - // ------------------------------------------------------- - // | A1 | A2 | | | | - // ------------------------------------------------------- - // - constexpr uint64_t heapSize = 128; - constexpr uint64_t maxBlockSize = 512; - PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - - constexpr uint64_t alignment = 64; - - ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment); - ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); - ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); - - ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment); - ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u); - ASSERT_EQ(allocation2.GetOffset(), 64u); - ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Reuses H0 - ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); - - ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment); - ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u); - ASSERT_EQ(allocation3.GetOffset(), 0u); - ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); - ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap()); - - ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment); - ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u); - ASSERT_EQ(allocation4.GetOffset(), 0u); - ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated); - - ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); - ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap()); -} - -// Verify allocating a very large resource does not overflow. -TEST(BuddyMemoryAllocatorTests, AllocationOverflow) { - constexpr uint64_t heapSize = 128; - constexpr uint64_t maxBlockSize = 512; - PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - - constexpr uint64_t largeBlock = (1ull << 63) + 1; - ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock); - ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); -} - -// Verify resource heaps will be reused from a pool. -TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) { - constexpr uint64_t kHeapSize = 128; - constexpr uint64_t kMaxBlockSize = 4096; - - PlaceholderResourceHeapAllocator heapAllocator; - PooledResourceMemoryAllocator poolAllocator(&heapAllocator); - PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator); - - std::set heaps = {}; - std::vector allocations = {}; - - constexpr uint32_t kNumOfAllocations = 100; - - // Allocate |kNumOfAllocations|. - for (uint32_t i = 0; i < kNumOfAllocations; i++) { - ResourceMemoryAllocation allocation = allocator.Allocate(4); - ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated); - heaps.insert(allocation.GetResourceHeap()); - allocations.push_back(std::move(allocation)); + // Verify resource sub-allocation of various sizes with same alignments. + TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) { + // After two 64 byte and two 128 byte resource allocations: + // + // max block size -> ------------------------------------------------------- + // | | + // ------------------------------------------------------- + // | | | + // max heap size -> ------------------------------------------------------- + // | H0 | A3/H1 | A4/H2 | | + // ------------------------------------------------------- + // | A1 | A2 | | | | + // ------------------------------------------------------- + // + constexpr uint64_t heapSize = 128; + constexpr uint64_t maxBlockSize = 512; + PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); + + constexpr uint64_t alignment = 64; + + ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment); + ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); + ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); + + ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment); + ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u); + ASSERT_EQ(allocation2.GetOffset(), 64u); + ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Reuses H0 + ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap()); + + ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment); + ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u); + ASSERT_EQ(allocation3.GetOffset(), 0u); + ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); + ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap()); + + ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment); + ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u); + ASSERT_EQ(allocation4.GetOffset(), 0u); + ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated); + + ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); + ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap()); } - ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); + // Verify allocating a very large resource does not overflow. + TEST(BuddyMemoryAllocatorTests, AllocationOverflow) { + constexpr uint64_t heapSize = 128; + constexpr uint64_t maxBlockSize = 512; + PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); - // Return the allocations to the pool. - for (ResourceMemoryAllocation& allocation : allocations) { - allocator.Deallocate(allocation); + constexpr uint64_t largeBlock = (1ull << 63) + 1; + ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock); + ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid); } - ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size()); + // Verify resource heaps will be reused from a pool. + TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) { + constexpr uint64_t kHeapSize = 128; + constexpr uint64_t kMaxBlockSize = 4096; - // Allocate again reusing the same heaps. - for (uint32_t i = 0; i < kNumOfAllocations; i++) { - ResourceMemoryAllocation allocation = allocator.Allocate(4); - ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated); - ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second); + PlaceholderResourceHeapAllocator heapAllocator; + PooledResourceMemoryAllocator poolAllocator(&heapAllocator); + PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator); + + std::set heaps = {}; + std::vector allocations = {}; + + constexpr uint32_t kNumOfAllocations = 100; + + // Allocate |kNumOfAllocations|. + for (uint32_t i = 0; i < kNumOfAllocations; i++) { + ResourceMemoryAllocation allocation = allocator.Allocate(4); + ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated); + heaps.insert(allocation.GetResourceHeap()); + allocations.push_back(std::move(allocation)); + } + + ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); + + // Return the allocations to the pool. + for (ResourceMemoryAllocation& allocation : allocations) { + allocator.Deallocate(allocation); + } + + ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size()); + + // Allocate again reusing the same heaps. + for (uint32_t i = 0; i < kNumOfAllocations; i++) { + ResourceMemoryAllocation allocation = allocator.Allocate(4); + ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated); + ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second); + } + + ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); } - ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); -} + // Verify resource heaps that were reused from a pool can be destroyed. + TEST(BuddyMemoryAllocatorTests, DestroyHeaps) { + constexpr uint64_t kHeapSize = 128; + constexpr uint64_t kMaxBlockSize = 4096; -// Verify resource heaps that were reused from a pool can be destroyed. -TEST(BuddyMemoryAllocatorTests, DestroyHeaps) { - constexpr uint64_t kHeapSize = 128; - constexpr uint64_t kMaxBlockSize = 4096; + PlaceholderResourceHeapAllocator heapAllocator; + PooledResourceMemoryAllocator poolAllocator(&heapAllocator); + PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator); - PlaceholderResourceHeapAllocator heapAllocator; - PooledResourceMemoryAllocator poolAllocator(&heapAllocator); - PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator); + std::set heaps = {}; + std::vector allocations = {}; - std::set heaps = {}; - std::vector allocations = {}; + // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth + // of buffers. Otherwise, the heap may be reused if not full. + constexpr uint32_t kNumOfHeaps = 10; - // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth of - // buffers. Otherwise, the heap may be reused if not full. - constexpr uint32_t kNumOfHeaps = 10; + // Allocate |kNumOfHeaps| worth. + while (heaps.size() < kNumOfHeaps) { + ResourceMemoryAllocation allocation = allocator.Allocate(4); + ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated); + heaps.insert(allocation.GetResourceHeap()); + allocations.push_back(std::move(allocation)); + } - // Allocate |kNumOfHeaps| worth. - while (heaps.size() < kNumOfHeaps) { - ResourceMemoryAllocation allocation = allocator.Allocate(4); - ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated); - heaps.insert(allocation.GetResourceHeap()); - allocations.push_back(std::move(allocation)); + ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); + + // Return the allocations to the pool. + for (ResourceMemoryAllocation& allocation : allocations) { + allocator.Deallocate(allocation); + } + + ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps); + + // Make sure we can destroy the remaining heaps. + poolAllocator.DestroyPool(); + ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); } - ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); - - // Return the allocations to the pool. - for (ResourceMemoryAllocation& allocation : allocations) { - allocator.Deallocate(allocation); - } - - ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps); - - // Make sure we can destroy the remaining heaps. - poolAllocator.DestroyPool(); - ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); -} +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/CommandAllocatorTests.cpp b/src/dawn/tests/unittests/CommandAllocatorTests.cpp index fc5df40466..cea19226b7 100644 --- a/src/dawn/tests/unittests/CommandAllocatorTests.cpp +++ b/src/dawn/tests/unittests/CommandAllocatorTests.cpp @@ -19,486 +19,490 @@ #include "gtest/gtest.h" #include "dawn/native/CommandAllocator.h" -using namespace dawn::native; - -// Definition of the command types used in the tests -enum class CommandType { - Draw, - Pipeline, - PushConstants, - Big, - Small, -}; - -struct CommandDraw { - uint32_t first; - uint32_t count; -}; - -struct CommandPipeline { - uint64_t pipeline; - uint32_t attachmentPoint; -}; - -struct CommandPushConstants { - uint8_t size; - uint8_t offset; -}; - -constexpr int kBigBufferSize = 65536; - -struct CommandBig { - uint32_t buffer[kBigBufferSize]; -}; - -struct CommandSmall { - uint16_t data; -}; - -// Test allocating nothing works -TEST(CommandAllocator, DoNothingAllocator) { - CommandAllocator allocator; -} - -// Test iterating over nothing works -TEST(CommandAllocator, DoNothingAllocatorWithIterator) { - CommandAllocator allocator; - CommandIterator iterator(std::move(allocator)); - iterator.MakeEmptyAsDataWasDestroyed(); -} - -// Test basic usage of allocator + iterator -TEST(CommandAllocator, Basic) { - CommandAllocator allocator; - - uint64_t myPipeline = 0xDEADBEEFBEEFDEAD; - uint32_t myAttachmentPoint = 2; - uint32_t myFirst = 42; - uint32_t myCount = 16; - - { - CommandPipeline* pipeline = allocator.Allocate(CommandType::Pipeline); - pipeline->pipeline = myPipeline; - pipeline->attachmentPoint = myAttachmentPoint; - - CommandDraw* draw = allocator.Allocate(CommandType::Draw); - draw->first = myFirst; - draw->count = myCount; - } - - { - CommandIterator iterator(std::move(allocator)); - CommandType type; - - bool hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::Pipeline); - - CommandPipeline* pipeline = iterator.NextCommand(); - ASSERT_EQ(pipeline->pipeline, myPipeline); - ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint); - - hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::Draw); - - CommandDraw* draw = iterator.NextCommand(); - ASSERT_EQ(draw->first, myFirst); - ASSERT_EQ(draw->count, myCount); - - hasNext = iterator.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - iterator.MakeEmptyAsDataWasDestroyed(); - } -} - -// Test basic usage of allocator + iterator with data -TEST(CommandAllocator, BasicWithData) { - CommandAllocator allocator; - - uint8_t mySize = 8; - uint8_t myOffset = 3; - uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54}; - - { - CommandPushConstants* pushConstants = - allocator.Allocate(CommandType::PushConstants); - pushConstants->size = mySize; - pushConstants->offset = myOffset; - - uint32_t* values = allocator.AllocateData(5); - for (size_t i = 0; i < 5; i++) { - values[i] = myValues[i]; - } - } - - { - CommandIterator iterator(std::move(allocator)); - CommandType type; - - bool hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::PushConstants); - - CommandPushConstants* pushConstants = iterator.NextCommand(); - ASSERT_EQ(pushConstants->size, mySize); - ASSERT_EQ(pushConstants->offset, myOffset); - - uint32_t* values = iterator.NextData(5); - for (size_t i = 0; i < 5; i++) { - ASSERT_EQ(values[i], myValues[i]); - } - - hasNext = iterator.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - iterator.MakeEmptyAsDataWasDestroyed(); - } -} - -// Test basic iterating several times -TEST(CommandAllocator, MultipleIterations) { - CommandAllocator allocator; - - uint32_t myFirst = 42; - uint32_t myCount = 16; - - { - CommandDraw* draw = allocator.Allocate(CommandType::Draw); - draw->first = myFirst; - draw->count = myCount; - } - - { - CommandIterator iterator(std::move(allocator)); - CommandType type; - - // First iteration - bool hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::Draw); - - CommandDraw* draw = iterator.NextCommand(); - ASSERT_EQ(draw->first, myFirst); - ASSERT_EQ(draw->count, myCount); - - hasNext = iterator.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - // Second iteration - hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::Draw); - - draw = iterator.NextCommand(); - ASSERT_EQ(draw->first, myFirst); - ASSERT_EQ(draw->count, myCount); - - hasNext = iterator.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - iterator.MakeEmptyAsDataWasDestroyed(); - } -} -// Test large commands work -TEST(CommandAllocator, LargeCommands) { - CommandAllocator allocator; - - const int kCommandCount = 5; - - uint32_t count = 0; - for (int i = 0; i < kCommandCount; i++) { - CommandBig* big = allocator.Allocate(CommandType::Big); - for (int j = 0; j < kBigBufferSize; j++) { - big->buffer[j] = count++; - } - } - - CommandIterator iterator(std::move(allocator)); - CommandType type; - count = 0; - int numCommands = 0; - while (iterator.NextCommandId(&type)) { - ASSERT_EQ(type, CommandType::Big); - - CommandBig* big = iterator.NextCommand(); - for (int i = 0; i < kBigBufferSize; i++) { - ASSERT_EQ(big->buffer[i], count); - count++; - } - numCommands++; - } - ASSERT_EQ(numCommands, kCommandCount); - - iterator.MakeEmptyAsDataWasDestroyed(); -} - -// Test many small commands work -TEST(CommandAllocator, ManySmallCommands) { - CommandAllocator allocator; - - // Stay under max representable uint16_t - const int kCommandCount = 50000; - - uint16_t count = 0; - for (int i = 0; i < kCommandCount; i++) { - CommandSmall* small = allocator.Allocate(CommandType::Small); - small->data = count++; - } - - CommandIterator iterator(std::move(allocator)); - CommandType type; - count = 0; - int numCommands = 0; - while (iterator.NextCommandId(&type)) { - ASSERT_EQ(type, CommandType::Small); - - CommandSmall* small = iterator.NextCommand(); - ASSERT_EQ(small->data, count); - count++; - numCommands++; - } - ASSERT_EQ(numCommands, kCommandCount); - - iterator.MakeEmptyAsDataWasDestroyed(); -} - -/* ________ - * / \ - * | POUIC! | - * \_ ______/ - * v - * ()_() - * (O.o) - * (> <)o - */ - -// Test usage of iterator.Reset -TEST(CommandAllocator, IteratorReset) { - CommandAllocator allocator; - - uint64_t myPipeline = 0xDEADBEEFBEEFDEAD; - uint32_t myAttachmentPoint = 2; - uint32_t myFirst = 42; - uint32_t myCount = 16; - - { - CommandPipeline* pipeline = allocator.Allocate(CommandType::Pipeline); - pipeline->pipeline = myPipeline; - pipeline->attachmentPoint = myAttachmentPoint; - - CommandDraw* draw = allocator.Allocate(CommandType::Draw); - draw->first = myFirst; - draw->count = myCount; - } - - { - CommandIterator iterator(std::move(allocator)); - CommandType type; - - bool hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::Pipeline); - - CommandPipeline* pipeline = iterator.NextCommand(); - ASSERT_EQ(pipeline->pipeline, myPipeline); - ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint); - - iterator.Reset(); - - hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::Pipeline); - - pipeline = iterator.NextCommand(); - ASSERT_EQ(pipeline->pipeline, myPipeline); - ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint); - - hasNext = iterator.NextCommandId(&type); - ASSERT_TRUE(hasNext); - ASSERT_EQ(type, CommandType::Draw); - - CommandDraw* draw = iterator.NextCommand(); - ASSERT_EQ(draw->first, myFirst); - ASSERT_EQ(draw->count, myCount); - - hasNext = iterator.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - iterator.MakeEmptyAsDataWasDestroyed(); - } -} - -// Test iterating empty iterators -TEST(CommandAllocator, EmptyIterator) { - { - CommandAllocator allocator; - CommandIterator iterator(std::move(allocator)); - - CommandType type; - bool hasNext = iterator.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - iterator.MakeEmptyAsDataWasDestroyed(); - } - { - CommandAllocator allocator; - CommandIterator iterator1(std::move(allocator)); - CommandIterator iterator2(std::move(iterator1)); - - CommandType type; - bool hasNext = iterator2.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - iterator1.MakeEmptyAsDataWasDestroyed(); - iterator2.MakeEmptyAsDataWasDestroyed(); - } - { - CommandIterator iterator1; - CommandIterator iterator2(std::move(iterator1)); - - CommandType type; - bool hasNext = iterator2.NextCommandId(&type); - ASSERT_FALSE(hasNext); - - iterator1.MakeEmptyAsDataWasDestroyed(); - iterator2.MakeEmptyAsDataWasDestroyed(); - } -} - -template -struct alignas(A) AlignedStruct { - char placeholder; -}; - -// Test for overflows in Allocate's computations, size 1 variant -TEST(CommandAllocator, AllocationOverflow_1) { - CommandAllocator allocator; - AlignedStruct<1>* data = - allocator.AllocateData>(std::numeric_limits::max() / 1); - ASSERT_EQ(data, nullptr); -} - -// Test for overflows in Allocate's computations, size 2 variant -TEST(CommandAllocator, AllocationOverflow_2) { - CommandAllocator allocator; - AlignedStruct<2>* data = - allocator.AllocateData>(std::numeric_limits::max() / 2); - ASSERT_EQ(data, nullptr); -} - -// Test for overflows in Allocate's computations, size 4 variant -TEST(CommandAllocator, AllocationOverflow_4) { - CommandAllocator allocator; - AlignedStruct<4>* data = - allocator.AllocateData>(std::numeric_limits::max() / 4); - ASSERT_EQ(data, nullptr); -} - -// Test for overflows in Allocate's computations, size 8 variant -TEST(CommandAllocator, AllocationOverflow_8) { - CommandAllocator allocator; - AlignedStruct<8>* data = - allocator.AllocateData>(std::numeric_limits::max() / 8); - ASSERT_EQ(data, nullptr); -} - -template -struct IntWithDefault { - IntWithDefault() : value(DefaultValue) { - } - - int value; -}; - -// Test that the allcator correctly defaults initalizes data for Allocate -TEST(CommandAllocator, AllocateDefaultInitializes) { - CommandAllocator allocator; - - IntWithDefault<42>* int42 = allocator.Allocate>(CommandType::Draw); - ASSERT_EQ(int42->value, 42); - - IntWithDefault<43>* int43 = allocator.Allocate>(CommandType::Draw); - ASSERT_EQ(int43->value, 43); - - IntWithDefault<44>* int44 = allocator.Allocate>(CommandType::Draw); - ASSERT_EQ(int44->value, 44); - - CommandIterator iterator(std::move(allocator)); - iterator.MakeEmptyAsDataWasDestroyed(); -} - -// Test that the allocator correctly default-initalizes data for AllocateData -TEST(CommandAllocator, AllocateDataDefaultInitializes) { - CommandAllocator allocator; - - IntWithDefault<33>* int33 = allocator.AllocateData>(1); - ASSERT_EQ(int33[0].value, 33); - - IntWithDefault<34>* int34 = allocator.AllocateData>(2); - ASSERT_EQ(int34[0].value, 34); - ASSERT_EQ(int34[0].value, 34); - - IntWithDefault<35>* int35 = allocator.AllocateData>(3); - ASSERT_EQ(int35[0].value, 35); - ASSERT_EQ(int35[1].value, 35); - ASSERT_EQ(int35[2].value, 35); - - CommandIterator iterator(std::move(allocator)); - iterator.MakeEmptyAsDataWasDestroyed(); -} - -// Tests flattening of multiple CommandAllocators into a single CommandIterator using -// AcquireCommandBlocks. -TEST(CommandAllocator, AcquireCommandBlocks) { - constexpr size_t kNumAllocators = 2; - constexpr size_t kNumCommandsPerAllocator = 2; - const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = { - {0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE}, - {0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE}, +namespace dawn::native { + + // Definition of the command types used in the tests + enum class CommandType { + Draw, + Pipeline, + PushConstants, + Big, + Small, }; - const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2}, {3, 4}}; - const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}}; - const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}}; - std::vector allocators(kNumAllocators); - for (size_t j = 0; j < kNumAllocators; ++j) { - CommandAllocator& allocator = allocators[j]; - for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) { + struct CommandDraw { + uint32_t first; + uint32_t count; + }; + + struct CommandPipeline { + uint64_t pipeline; + uint32_t attachmentPoint; + }; + + struct CommandPushConstants { + uint8_t size; + uint8_t offset; + }; + + constexpr int kBigBufferSize = 65536; + + struct CommandBig { + uint32_t buffer[kBigBufferSize]; + }; + + struct CommandSmall { + uint16_t data; + }; + + // Test allocating nothing works + TEST(CommandAllocator, DoNothingAllocator) { + CommandAllocator allocator; + } + + // Test iterating over nothing works + TEST(CommandAllocator, DoNothingAllocatorWithIterator) { + CommandAllocator allocator; + CommandIterator iterator(std::move(allocator)); + iterator.MakeEmptyAsDataWasDestroyed(); + } + + // Test basic usage of allocator + iterator + TEST(CommandAllocator, Basic) { + CommandAllocator allocator; + + uint64_t myPipeline = 0xDEADBEEFBEEFDEAD; + uint32_t myAttachmentPoint = 2; + uint32_t myFirst = 42; + uint32_t myCount = 16; + + { CommandPipeline* pipeline = allocator.Allocate(CommandType::Pipeline); - pipeline->pipeline = pipelines[j][i]; - pipeline->attachmentPoint = attachmentPoints[j][i]; + pipeline->pipeline = myPipeline; + pipeline->attachmentPoint = myAttachmentPoint; CommandDraw* draw = allocator.Allocate(CommandType::Draw); - draw->first = firsts[j][i]; - draw->count = counts[j][i]; + draw->first = myFirst; + draw->count = myCount; } - } - CommandIterator iterator; - iterator.AcquireCommandBlocks(std::move(allocators)); - for (size_t j = 0; j < kNumAllocators; ++j) { - for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) { + { + CommandIterator iterator(std::move(allocator)); CommandType type; + bool hasNext = iterator.NextCommandId(&type); ASSERT_TRUE(hasNext); ASSERT_EQ(type, CommandType::Pipeline); CommandPipeline* pipeline = iterator.NextCommand(); - ASSERT_EQ(pipeline->pipeline, pipelines[j][i]); - ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]); + ASSERT_EQ(pipeline->pipeline, myPipeline); + ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint); hasNext = iterator.NextCommandId(&type); ASSERT_TRUE(hasNext); ASSERT_EQ(type, CommandType::Draw); CommandDraw* draw = iterator.NextCommand(); - ASSERT_EQ(draw->first, firsts[j][i]); - ASSERT_EQ(draw->count, counts[j][i]); + ASSERT_EQ(draw->first, myFirst); + ASSERT_EQ(draw->count, myCount); + + hasNext = iterator.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + iterator.MakeEmptyAsDataWasDestroyed(); } } - CommandType type; - ASSERT_FALSE(iterator.NextCommandId(&type)); - iterator.MakeEmptyAsDataWasDestroyed(); -} + + // Test basic usage of allocator + iterator with data + TEST(CommandAllocator, BasicWithData) { + CommandAllocator allocator; + + uint8_t mySize = 8; + uint8_t myOffset = 3; + uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54}; + + { + CommandPushConstants* pushConstants = + allocator.Allocate(CommandType::PushConstants); + pushConstants->size = mySize; + pushConstants->offset = myOffset; + + uint32_t* values = allocator.AllocateData(5); + for (size_t i = 0; i < 5; i++) { + values[i] = myValues[i]; + } + } + + { + CommandIterator iterator(std::move(allocator)); + CommandType type; + + bool hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::PushConstants); + + CommandPushConstants* pushConstants = iterator.NextCommand(); + ASSERT_EQ(pushConstants->size, mySize); + ASSERT_EQ(pushConstants->offset, myOffset); + + uint32_t* values = iterator.NextData(5); + for (size_t i = 0; i < 5; i++) { + ASSERT_EQ(values[i], myValues[i]); + } + + hasNext = iterator.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + iterator.MakeEmptyAsDataWasDestroyed(); + } + } + + // Test basic iterating several times + TEST(CommandAllocator, MultipleIterations) { + CommandAllocator allocator; + + uint32_t myFirst = 42; + uint32_t myCount = 16; + + { + CommandDraw* draw = allocator.Allocate(CommandType::Draw); + draw->first = myFirst; + draw->count = myCount; + } + + { + CommandIterator iterator(std::move(allocator)); + CommandType type; + + // First iteration + bool hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::Draw); + + CommandDraw* draw = iterator.NextCommand(); + ASSERT_EQ(draw->first, myFirst); + ASSERT_EQ(draw->count, myCount); + + hasNext = iterator.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + // Second iteration + hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::Draw); + + draw = iterator.NextCommand(); + ASSERT_EQ(draw->first, myFirst); + ASSERT_EQ(draw->count, myCount); + + hasNext = iterator.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + iterator.MakeEmptyAsDataWasDestroyed(); + } + } + // Test large commands work + TEST(CommandAllocator, LargeCommands) { + CommandAllocator allocator; + + const int kCommandCount = 5; + + uint32_t count = 0; + for (int i = 0; i < kCommandCount; i++) { + CommandBig* big = allocator.Allocate(CommandType::Big); + for (int j = 0; j < kBigBufferSize; j++) { + big->buffer[j] = count++; + } + } + + CommandIterator iterator(std::move(allocator)); + CommandType type; + count = 0; + int numCommands = 0; + while (iterator.NextCommandId(&type)) { + ASSERT_EQ(type, CommandType::Big); + + CommandBig* big = iterator.NextCommand(); + for (int i = 0; i < kBigBufferSize; i++) { + ASSERT_EQ(big->buffer[i], count); + count++; + } + numCommands++; + } + ASSERT_EQ(numCommands, kCommandCount); + + iterator.MakeEmptyAsDataWasDestroyed(); + } + + // Test many small commands work + TEST(CommandAllocator, ManySmallCommands) { + CommandAllocator allocator; + + // Stay under max representable uint16_t + const int kCommandCount = 50000; + + uint16_t count = 0; + for (int i = 0; i < kCommandCount; i++) { + CommandSmall* small = allocator.Allocate(CommandType::Small); + small->data = count++; + } + + CommandIterator iterator(std::move(allocator)); + CommandType type; + count = 0; + int numCommands = 0; + while (iterator.NextCommandId(&type)) { + ASSERT_EQ(type, CommandType::Small); + + CommandSmall* small = iterator.NextCommand(); + ASSERT_EQ(small->data, count); + count++; + numCommands++; + } + ASSERT_EQ(numCommands, kCommandCount); + + iterator.MakeEmptyAsDataWasDestroyed(); + } + + /* ________ + * / \ + * | POUIC! | + * \_ ______/ + * v + * ()_() + * (O.o) + * (> <)o + */ + + // Test usage of iterator.Reset + TEST(CommandAllocator, IteratorReset) { + CommandAllocator allocator; + + uint64_t myPipeline = 0xDEADBEEFBEEFDEAD; + uint32_t myAttachmentPoint = 2; + uint32_t myFirst = 42; + uint32_t myCount = 16; + + { + CommandPipeline* pipeline = allocator.Allocate(CommandType::Pipeline); + pipeline->pipeline = myPipeline; + pipeline->attachmentPoint = myAttachmentPoint; + + CommandDraw* draw = allocator.Allocate(CommandType::Draw); + draw->first = myFirst; + draw->count = myCount; + } + + { + CommandIterator iterator(std::move(allocator)); + CommandType type; + + bool hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::Pipeline); + + CommandPipeline* pipeline = iterator.NextCommand(); + ASSERT_EQ(pipeline->pipeline, myPipeline); + ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint); + + iterator.Reset(); + + hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::Pipeline); + + pipeline = iterator.NextCommand(); + ASSERT_EQ(pipeline->pipeline, myPipeline); + ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint); + + hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::Draw); + + CommandDraw* draw = iterator.NextCommand(); + ASSERT_EQ(draw->first, myFirst); + ASSERT_EQ(draw->count, myCount); + + hasNext = iterator.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + iterator.MakeEmptyAsDataWasDestroyed(); + } + } + + // Test iterating empty iterators + TEST(CommandAllocator, EmptyIterator) { + { + CommandAllocator allocator; + CommandIterator iterator(std::move(allocator)); + + CommandType type; + bool hasNext = iterator.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + iterator.MakeEmptyAsDataWasDestroyed(); + } + { + CommandAllocator allocator; + CommandIterator iterator1(std::move(allocator)); + CommandIterator iterator2(std::move(iterator1)); + + CommandType type; + bool hasNext = iterator2.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + iterator1.MakeEmptyAsDataWasDestroyed(); + iterator2.MakeEmptyAsDataWasDestroyed(); + } + { + CommandIterator iterator1; + CommandIterator iterator2(std::move(iterator1)); + + CommandType type; + bool hasNext = iterator2.NextCommandId(&type); + ASSERT_FALSE(hasNext); + + iterator1.MakeEmptyAsDataWasDestroyed(); + iterator2.MakeEmptyAsDataWasDestroyed(); + } + } + + template + struct alignas(A) AlignedStruct { + char placeholder; + }; + + // Test for overflows in Allocate's computations, size 1 variant + TEST(CommandAllocator, AllocationOverflow_1) { + CommandAllocator allocator; + AlignedStruct<1>* data = + allocator.AllocateData>(std::numeric_limits::max() / 1); + ASSERT_EQ(data, nullptr); + } + + // Test for overflows in Allocate's computations, size 2 variant + TEST(CommandAllocator, AllocationOverflow_2) { + CommandAllocator allocator; + AlignedStruct<2>* data = + allocator.AllocateData>(std::numeric_limits::max() / 2); + ASSERT_EQ(data, nullptr); + } + + // Test for overflows in Allocate's computations, size 4 variant + TEST(CommandAllocator, AllocationOverflow_4) { + CommandAllocator allocator; + AlignedStruct<4>* data = + allocator.AllocateData>(std::numeric_limits::max() / 4); + ASSERT_EQ(data, nullptr); + } + + // Test for overflows in Allocate's computations, size 8 variant + TEST(CommandAllocator, AllocationOverflow_8) { + CommandAllocator allocator; + AlignedStruct<8>* data = + allocator.AllocateData>(std::numeric_limits::max() / 8); + ASSERT_EQ(data, nullptr); + } + + template + struct IntWithDefault { + IntWithDefault() : value(DefaultValue) { + } + + int value; + }; + + // Test that the allcator correctly defaults initalizes data for Allocate + TEST(CommandAllocator, AllocateDefaultInitializes) { + CommandAllocator allocator; + + IntWithDefault<42>* int42 = allocator.Allocate>(CommandType::Draw); + ASSERT_EQ(int42->value, 42); + + IntWithDefault<43>* int43 = allocator.Allocate>(CommandType::Draw); + ASSERT_EQ(int43->value, 43); + + IntWithDefault<44>* int44 = allocator.Allocate>(CommandType::Draw); + ASSERT_EQ(int44->value, 44); + + CommandIterator iterator(std::move(allocator)); + iterator.MakeEmptyAsDataWasDestroyed(); + } + + // Test that the allocator correctly default-initalizes data for AllocateData + TEST(CommandAllocator, AllocateDataDefaultInitializes) { + CommandAllocator allocator; + + IntWithDefault<33>* int33 = allocator.AllocateData>(1); + ASSERT_EQ(int33[0].value, 33); + + IntWithDefault<34>* int34 = allocator.AllocateData>(2); + ASSERT_EQ(int34[0].value, 34); + ASSERT_EQ(int34[0].value, 34); + + IntWithDefault<35>* int35 = allocator.AllocateData>(3); + ASSERT_EQ(int35[0].value, 35); + ASSERT_EQ(int35[1].value, 35); + ASSERT_EQ(int35[2].value, 35); + + CommandIterator iterator(std::move(allocator)); + iterator.MakeEmptyAsDataWasDestroyed(); + } + + // Tests flattening of multiple CommandAllocators into a single CommandIterator using + // AcquireCommandBlocks. + TEST(CommandAllocator, AcquireCommandBlocks) { + constexpr size_t kNumAllocators = 2; + constexpr size_t kNumCommandsPerAllocator = 2; + const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = { + {0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE}, + {0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE}, + }; + const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2}, + {3, 4}}; + const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}}; + const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}}; + + std::vector allocators(kNumAllocators); + for (size_t j = 0; j < kNumAllocators; ++j) { + CommandAllocator& allocator = allocators[j]; + for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) { + CommandPipeline* pipeline = + allocator.Allocate(CommandType::Pipeline); + pipeline->pipeline = pipelines[j][i]; + pipeline->attachmentPoint = attachmentPoints[j][i]; + + CommandDraw* draw = allocator.Allocate(CommandType::Draw); + draw->first = firsts[j][i]; + draw->count = counts[j][i]; + } + } + + CommandIterator iterator; + iterator.AcquireCommandBlocks(std::move(allocators)); + for (size_t j = 0; j < kNumAllocators; ++j) { + for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) { + CommandType type; + bool hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::Pipeline); + + CommandPipeline* pipeline = iterator.NextCommand(); + ASSERT_EQ(pipeline->pipeline, pipelines[j][i]); + ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]); + + hasNext = iterator.NextCommandId(&type); + ASSERT_TRUE(hasNext); + ASSERT_EQ(type, CommandType::Draw); + + CommandDraw* draw = iterator.NextCommand(); + ASSERT_EQ(draw->first, firsts[j][i]); + ASSERT_EQ(draw->count, counts[j][i]); + } + } + CommandType type; + ASSERT_FALSE(iterator.NextCommandId(&type)); + iterator.MakeEmptyAsDataWasDestroyed(); + } + +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/ErrorTests.cpp b/src/dawn/tests/unittests/ErrorTests.cpp index b0ada199bd..f8566136cf 100644 --- a/src/dawn/tests/unittests/ErrorTests.cpp +++ b/src/dawn/tests/unittests/ErrorTests.cpp @@ -18,9 +18,7 @@ #include "dawn/native/ErrorData.h" #include "gtest/gtest.h" -using namespace dawn::native; - -namespace { +namespace dawn::native { namespace { int placeholderSuccess = 0xbeef; const char* placeholderErrorMessage = "I am an error message :3"; @@ -360,4 +358,6 @@ namespace { ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage); } -} // anonymous namespace + // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented. + // NOLINTNEXTLINE(readability/namespace) +}} // namespace dawn::native:: diff --git a/src/dawn/tests/unittests/PerStageTests.cpp b/src/dawn/tests/unittests/PerStageTests.cpp index 1ae2e17b2f..ccfdee4cef 100644 --- a/src/dawn/tests/unittests/PerStageTests.cpp +++ b/src/dawn/tests/unittests/PerStageTests.cpp @@ -16,74 +16,76 @@ #include "dawn/native/PerStage.h" -using namespace dawn::native; +namespace dawn::native { -// Tests for StageBit -TEST(PerStage, StageBit) { - ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex); - ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment); - ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute); -} - -// Basic test for the PerStage container -TEST(PerStage, PerStage) { - PerStage data; - - // Store data using wgpu::ShaderStage - data[SingleShaderStage::Vertex] = 42; - data[SingleShaderStage::Fragment] = 3; - data[SingleShaderStage::Compute] = -1; - - // Load it using wgpu::ShaderStage - ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42); - ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3); - ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1); -} - -// Test IterateStages with kAllStages -TEST(PerStage, IterateAllStages) { - PerStage counts; - counts[SingleShaderStage::Vertex] = 0; - counts[SingleShaderStage::Fragment] = 0; - counts[SingleShaderStage::Compute] = 0; - - for (auto stage : IterateStages(kAllStages)) { - counts[stage]++; + // Tests for StageBit + TEST(PerStage, StageBit) { + ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex); + ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment); + ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute); } - ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1); - ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1); - ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1); -} + // Basic test for the PerStage container + TEST(PerStage, PerStage) { + PerStage data; -// Test IterateStages with one stage -TEST(PerStage, IterateOneStage) { - PerStage counts; - counts[SingleShaderStage::Vertex] = 0; - counts[SingleShaderStage::Fragment] = 0; - counts[SingleShaderStage::Compute] = 0; + // Store data using wgpu::ShaderStage + data[SingleShaderStage::Vertex] = 42; + data[SingleShaderStage::Fragment] = 3; + data[SingleShaderStage::Compute] = -1; - for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) { - counts[stage]++; + // Load it using wgpu::ShaderStage + ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42); + ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3); + ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1); } - ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0); - ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1); - ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0); -} + // Test IterateStages with kAllStages + TEST(PerStage, IterateAllStages) { + PerStage counts; + counts[SingleShaderStage::Vertex] = 0; + counts[SingleShaderStage::Fragment] = 0; + counts[SingleShaderStage::Compute] = 0; -// Test IterateStages with no stage -TEST(PerStage, IterateNoStages) { - PerStage counts; - counts[SingleShaderStage::Vertex] = 0; - counts[SingleShaderStage::Fragment] = 0; - counts[SingleShaderStage::Compute] = 0; + for (auto stage : IterateStages(kAllStages)) { + counts[stage]++; + } - for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) { - counts[stage]++; + ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1); + ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1); + ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1); } - ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0); - ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0); - ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0); -} + // Test IterateStages with one stage + TEST(PerStage, IterateOneStage) { + PerStage counts; + counts[SingleShaderStage::Vertex] = 0; + counts[SingleShaderStage::Fragment] = 0; + counts[SingleShaderStage::Compute] = 0; + + for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) { + counts[stage]++; + } + + ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0); + ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1); + ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0); + } + + // Test IterateStages with no stage + TEST(PerStage, IterateNoStages) { + PerStage counts; + counts[SingleShaderStage::Vertex] = 0; + counts[SingleShaderStage::Fragment] = 0; + counts[SingleShaderStage::Compute] = 0; + + for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) { + counts[stage]++; + } + + ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0); + ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0); + ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0); + } + +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/PlacementAllocatedTests.cpp b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp index 9fcbda9137..a03177861e 100644 --- a/src/dawn/tests/unittests/PlacementAllocatedTests.cpp +++ b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp @@ -18,7 +18,8 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using namespace testing; +using testing::InSequence; +using testing::StrictMock; namespace { @@ -34,7 +35,7 @@ namespace { std::unique_ptr> mockDestructor; - class PlacementAllocatedTests : public Test { + class PlacementAllocatedTests : public testing::Test { void SetUp() override { mockDestructor = std::make_unique>(); } diff --git a/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp index 8653a3b223..b2a4f1034f 100644 --- a/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp +++ b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp @@ -17,159 +17,163 @@ #include "dawn/native/RingBufferAllocator.h" #include "gtest/gtest.h" -using namespace dawn::native; +namespace dawn::native { -constexpr uint64_t RingBufferAllocator::kInvalidOffset; + constexpr uint64_t RingBufferAllocator::kInvalidOffset; -// Number of basic tests for Ringbuffer -TEST(RingBufferAllocatorTests, BasicTest) { - constexpr uint64_t sizeInBytes = 64000; - RingBufferAllocator allocator(sizeInBytes); + // Number of basic tests for Ringbuffer + TEST(RingBufferAllocatorTests, BasicTest) { + constexpr uint64_t sizeInBytes = 64000; + RingBufferAllocator allocator(sizeInBytes); - // Ensure no requests exist on empty buffer. - EXPECT_TRUE(allocator.Empty()); + // Ensure no requests exist on empty buffer. + EXPECT_TRUE(allocator.Empty()); - ASSERT_EQ(allocator.GetSize(), sizeInBytes); + ASSERT_EQ(allocator.GetSize(), sizeInBytes); - // Ensure failure upon sub-allocating an oversized request. - ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)), - RingBufferAllocator::kInvalidOffset); + // Ensure failure upon sub-allocating an oversized request. + ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)), + RingBufferAllocator::kInvalidOffset); - // Fill the entire buffer with two requests of equal size. - ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u); - ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u); + // Fill the entire buffer with two requests of equal size. + ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u); + ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u); - // Ensure the buffer is full. - ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset); -} - -// Tests that several ringbuffer allocations do not fail. -TEST(RingBufferAllocatorTests, RingBufferManyAlloc) { - constexpr uint64_t maxNumOfFrames = 64000; - constexpr uint64_t frameSizeInBytes = 4; - - RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); - - size_t offset = 0; - for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) { - offset = allocator.Allocate(frameSizeInBytes, i); - ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes); + // Ensure the buffer is full. + ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset); } -} -// Tests ringbuffer sub-allocations of the same serial are correctly tracked. -TEST(RingBufferAllocatorTests, AllocInSameFrame) { - constexpr uint64_t maxNumOfFrames = 3; - constexpr uint64_t frameSizeInBytes = 4; + // Tests that several ringbuffer allocations do not fail. + TEST(RingBufferAllocatorTests, RingBufferManyAlloc) { + constexpr uint64_t maxNumOfFrames = 64000; + constexpr uint64_t frameSizeInBytes = 4; - RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); + RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); - // F1 - // [xxxx|--------] - size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1)); + size_t offset = 0; + for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) { + offset = allocator.Allocate(frameSizeInBytes, i); + ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes); + } + } - // F1 F2 - // [xxxx|xxxx|----] + // Tests ringbuffer sub-allocations of the same serial are correctly tracked. + TEST(RingBufferAllocatorTests, AllocInSameFrame) { + constexpr uint64_t maxNumOfFrames = 3; + constexpr uint64_t frameSizeInBytes = 4; - offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2)); + RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); - // F1 F2 - // [xxxx|xxxxxxxx] + // F1 + // [xxxx|--------] + size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1)); - offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2)); + // F1 F2 + // [xxxx|xxxx|----] - ASSERT_EQ(offset, 8u); - ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3); + offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2)); - allocator.Deallocate(ExecutionSerial(2)); + // F1 F2 + // [xxxx|xxxxxxxx] - ASSERT_EQ(allocator.GetUsedSize(), 0u); - EXPECT_TRUE(allocator.Empty()); -} + offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2)); -// Tests ringbuffer sub-allocation at various offsets. -TEST(RingBufferAllocatorTests, RingBufferSubAlloc) { - constexpr uint64_t maxNumOfFrames = 10; - constexpr uint64_t frameSizeInBytes = 4; + ASSERT_EQ(offset, 8u); + ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3); - RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); + allocator.Deallocate(ExecutionSerial(2)); - // Sub-alloc the first eight frames. - ExecutionSerial serial(0); - while (serial < ExecutionSerial(8)) { - allocator.Allocate(frameSizeInBytes, serial); + ASSERT_EQ(allocator.GetUsedSize(), 0u); + EXPECT_TRUE(allocator.Empty()); + } + + // Tests ringbuffer sub-allocation at various offsets. + TEST(RingBufferAllocatorTests, RingBufferSubAlloc) { + constexpr uint64_t maxNumOfFrames = 10; + constexpr uint64_t frameSizeInBytes = 4; + + RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); + + // Sub-alloc the first eight frames. + ExecutionSerial serial(0); + while (serial < ExecutionSerial(8)) { + allocator.Allocate(frameSizeInBytes, serial); + serial++; + } + + // Each frame corrresponds to the serial number (for simplicity). + // + // F1 F2 F3 F4 F5 F6 F7 F8 + // [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------] + // + + // Ensure an oversized allocation fails (only 8 bytes left) + ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial), + RingBufferAllocator::kInvalidOffset); + ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8); + + // Reclaim the first 3 frames. + allocator.Deallocate(ExecutionSerial(2)); + + // F4 F5 F6 F7 F8 + // [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------] + // + ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5); + + // Re-try the over-sized allocation. + size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial)); + + // F9 F4 F5 F6 F7 F8 + // [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx] + // ^^^^^^^^ wasted + + // In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes + // were added to F9's sub-allocation. + // TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes. + + ASSERT_EQ(offset, 0u); + ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames); + + // Ensure we are full. + ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), + RingBufferAllocator::kInvalidOffset); + + // Reclaim the next two frames. + allocator.Deallocate(ExecutionSerial(4)); + + // F9 F4 F5 F6 F7 F8 + // [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx] + // + ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8); + + // Sub-alloc the chunk in the middle. serial++; + offset = allocator.Allocate(frameSizeInBytes * 2, serial); + + ASSERT_EQ(offset, frameSizeInBytes * 3); + ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames); + + // F9 F10 F6 F7 F8 + // [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx] + // + + // Ensure we are full. + ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), + RingBufferAllocator::kInvalidOffset); + + // Reclaim all. + allocator.Deallocate(kMaxExecutionSerial); + + EXPECT_TRUE(allocator.Empty()); } - // Each frame corrresponds to the serial number (for simplicity). - // - // F1 F2 F3 F4 F5 F6 F7 F8 - // [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------] - // + // Checks if ringbuffer sub-allocation does not overflow. + TEST(RingBufferAllocatorTests, RingBufferOverflow) { + RingBufferAllocator allocator(std::numeric_limits::max()); - // Ensure an oversized allocation fails (only 8 bytes left) - ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial), - RingBufferAllocator::kInvalidOffset); - ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8); + ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u); + ASSERT_EQ(allocator.Allocate(std::numeric_limits::max(), ExecutionSerial(1)), + RingBufferAllocator::kInvalidOffset); + } - // Reclaim the first 3 frames. - allocator.Deallocate(ExecutionSerial(2)); - - // F4 F5 F6 F7 F8 - // [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------] - // - ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5); - - // Re-try the over-sized allocation. - size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial)); - - // F9 F4 F5 F6 F7 F8 - // [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx] - // ^^^^^^^^ wasted - - // In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes - // were added to F9's sub-allocation. - // TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes. - - ASSERT_EQ(offset, 0u); - ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames); - - // Ensure we are full. - ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset); - - // Reclaim the next two frames. - allocator.Deallocate(ExecutionSerial(4)); - - // F9 F4 F5 F6 F7 F8 - // [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx] - // - ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8); - - // Sub-alloc the chunk in the middle. - serial++; - offset = allocator.Allocate(frameSizeInBytes * 2, serial); - - ASSERT_EQ(offset, frameSizeInBytes * 3); - ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames); - - // F9 F10 F6 F7 F8 - // [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx] - // - - // Ensure we are full. - ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset); - - // Reclaim all. - allocator.Deallocate(kMaxExecutionSerial); - - EXPECT_TRUE(allocator.Empty()); -} - -// Checks if ringbuffer sub-allocation does not overflow. -TEST(RingBufferAllocatorTests, RingBufferOverflow) { - RingBufferAllocator allocator(std::numeric_limits::max()); - - ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u); - ASSERT_EQ(allocator.Allocate(std::numeric_limits::max(), ExecutionSerial(1)), - RingBufferAllocator::kInvalidOffset); -} +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/SubresourceStorageTests.cpp b/src/dawn/tests/unittests/SubresourceStorageTests.cpp index b526435512..a4e49c0f42 100644 --- a/src/dawn/tests/unittests/SubresourceStorageTests.cpp +++ b/src/dawn/tests/unittests/SubresourceStorageTests.cpp @@ -18,660 +18,672 @@ #include "dawn/native/SubresourceStorage.h" #include "gtest/gtest.h" -using namespace dawn::native; +namespace dawn::native { -// A fake class that replicates the behavior of SubresourceStorage but without any compression and -// is used to compare the results of operations on SubresourceStorage against the "ground truth" of -// FakeStorage. -template -struct FakeStorage { - FakeStorage(Aspect aspects, - uint32_t arrayLayerCount, - uint32_t mipLevelCount, - T initialValue = {}) - : mAspects(aspects), - mArrayLayerCount(arrayLayerCount), - mMipLevelCount(mipLevelCount), - mData(GetAspectCount(aspects) * arrayLayerCount * mipLevelCount, initialValue) { - } - - template - void Update(const SubresourceRange& range, F&& updateFunc) { - for (Aspect aspect : IterateEnumMask(range.aspects)) { - for (uint32_t layer = range.baseArrayLayer; - layer < range.baseArrayLayer + range.layerCount; layer++) { - for (uint32_t level = range.baseMipLevel; - level < range.baseMipLevel + range.levelCount; level++) { - SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level); - updateFunc(range, &mData[GetDataIndex(aspect, layer, level)]); - } - } - } - } - - template - void Merge(const SubresourceStorage& other, F&& mergeFunc) { - for (Aspect aspect : IterateEnumMask(mAspects)) { - for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) { - for (uint32_t level = 0; level < mMipLevelCount; level++) { - SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level); - mergeFunc(range, &mData[GetDataIndex(aspect, layer, level)], - other.Get(aspect, layer, level)); - } - } - } - } - - const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const { - return mData[GetDataIndex(aspect, arrayLayer, mipLevel)]; - } - - size_t GetDataIndex(Aspect aspect, uint32_t layer, uint32_t level) const { - uint32_t aspectIndex = GetAspectIndex(aspect); - return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex); - } - - // Method that checks that this and real have exactly the same content. It does so via looping - // on all subresources and calling Get() (hence testing Get()). It also calls Iterate() - // checking that every subresource is mentioned exactly once and that its content is correct - // (hence testing Iterate()). - // Its implementation requires the RangeTracker below that itself needs FakeStorage so it - // cannot be define inline with the other methods. - void CheckSameAs(const SubresourceStorage& real); - - Aspect mAspects; - uint32_t mArrayLayerCount; - uint32_t mMipLevelCount; - - std::vector mData; -}; - -// Track a set of ranges that have been seen and can assert that in aggregate they make exactly -// a single range (and that each subresource was seen only once). -struct RangeTracker { + // A fake class that replicates the behavior of SubresourceStorage but without any compression + // and is used to compare the results of operations on SubresourceStorage against the "ground + // truth" of FakeStorage. template - explicit RangeTracker(const SubresourceStorage& s) - : mTracked(s.GetAspectsForTesting(), - s.GetArrayLayerCountForTesting(), - s.GetMipLevelCountForTesting(), - 0) { - } - - void Track(const SubresourceRange& range) { - // Add +1 to the subresources tracked. - mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) { - ASSERT_EQ(*counter, 0u); - *counter += 1; - }); - } - - void CheckTrackedExactly(const SubresourceRange& range) { - // Check that all subresources in the range were tracked once and set the counter back to 0. - mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) { - ASSERT_EQ(*counter, 1u); - *counter = 0; - }); - - // Now all subresources should be at 0. - for (int counter : mTracked.mData) { - ASSERT_EQ(counter, 0); + struct FakeStorage { + FakeStorage(Aspect aspects, + uint32_t arrayLayerCount, + uint32_t mipLevelCount, + T initialValue = {}) + : mAspects(aspects), + mArrayLayerCount(arrayLayerCount), + mMipLevelCount(mipLevelCount), + mData(GetAspectCount(aspects) * arrayLayerCount * mipLevelCount, initialValue) { } - } - FakeStorage mTracked; -}; - -template -void FakeStorage::CheckSameAs(const SubresourceStorage& real) { - EXPECT_EQ(real.GetAspectsForTesting(), mAspects); - EXPECT_EQ(real.GetArrayLayerCountForTesting(), mArrayLayerCount); - EXPECT_EQ(real.GetMipLevelCountForTesting(), mMipLevelCount); - - RangeTracker tracker(real); - real.Iterate([&](const SubresourceRange& range, const T& data) { - // Check that the range is sensical. - EXPECT_TRUE(IsSubset(range.aspects, mAspects)); - - EXPECT_LT(range.baseArrayLayer, mArrayLayerCount); - EXPECT_LE(range.baseArrayLayer + range.layerCount, mArrayLayerCount); - - EXPECT_LT(range.baseMipLevel, mMipLevelCount); - EXPECT_LE(range.baseMipLevel + range.levelCount, mMipLevelCount); - - for (Aspect aspect : IterateEnumMask(range.aspects)) { - for (uint32_t layer = range.baseArrayLayer; - layer < range.baseArrayLayer + range.layerCount; layer++) { - for (uint32_t level = range.baseMipLevel; - level < range.baseMipLevel + range.levelCount; level++) { - EXPECT_EQ(data, Get(aspect, layer, level)); - EXPECT_EQ(data, real.Get(aspect, layer, level)); + template + void Update(const SubresourceRange& range, F&& updateFunc) { + for (Aspect aspect : IterateEnumMask(range.aspects)) { + for (uint32_t layer = range.baseArrayLayer; + layer < range.baseArrayLayer + range.layerCount; layer++) { + for (uint32_t level = range.baseMipLevel; + level < range.baseMipLevel + range.levelCount; level++) { + SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level); + updateFunc(range, &mData[GetDataIndex(aspect, layer, level)]); + } } } } - tracker.Track(range); - }); - - tracker.CheckTrackedExactly( - SubresourceRange::MakeFull(mAspects, mArrayLayerCount, mMipLevelCount)); -} - -template -void CheckAspectCompressed(const SubresourceStorage& s, Aspect aspect, bool expected) { - ASSERT(HasOneBit(aspect)); - - uint32_t levelCount = s.GetMipLevelCountForTesting(); - uint32_t layerCount = s.GetArrayLayerCountForTesting(); - - bool seen = false; - s.Iterate([&](const SubresourceRange& range, const T&) { - if (range.aspects == aspect && range.layerCount == layerCount && - range.levelCount == levelCount && range.baseArrayLayer == 0 && - range.baseMipLevel == 0) { - seen = true; + template + void Merge(const SubresourceStorage& other, F&& mergeFunc) { + for (Aspect aspect : IterateEnumMask(mAspects)) { + for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) { + for (uint32_t level = 0; level < mMipLevelCount; level++) { + SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level); + mergeFunc(range, &mData[GetDataIndex(aspect, layer, level)], + other.Get(aspect, layer, level)); + } + } + } } - }); - ASSERT_EQ(seen, expected); - - // Check that the internal state of SubresourceStorage matches what we expect. - // If an aspect is compressed, all its layers should be internally tagged as compressed. - ASSERT_EQ(s.IsAspectCompressedForTesting(aspect), expected); - if (expected) { - for (uint32_t layer = 0; layer < s.GetArrayLayerCountForTesting(); layer++) { - ASSERT_TRUE(s.IsLayerCompressedForTesting(aspect, layer)); + const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const { + return mData[GetDataIndex(aspect, arrayLayer, mipLevel)]; } - } -} -template -void CheckLayerCompressed(const SubresourceStorage& s, - Aspect aspect, - uint32_t layer, - bool expected) { - ASSERT(HasOneBit(aspect)); - - uint32_t levelCount = s.GetMipLevelCountForTesting(); - - bool seen = false; - s.Iterate([&](const SubresourceRange& range, const T&) { - if (range.aspects == aspect && range.layerCount == 1 && range.levelCount == levelCount && - range.baseArrayLayer == layer && range.baseMipLevel == 0) { - seen = true; + size_t GetDataIndex(Aspect aspect, uint32_t layer, uint32_t level) const { + uint32_t aspectIndex = GetAspectIndex(aspect); + return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex); } - }); - ASSERT_EQ(seen, expected); - ASSERT_EQ(s.IsLayerCompressedForTesting(aspect, layer), expected); -} + // Method that checks that this and real have exactly the same content. It does so via + // looping on all subresources and calling Get() (hence testing Get()). It also calls + // Iterate() checking that every subresource is mentioned exactly once and that its content + // is correct (hence testing Iterate()). Its implementation requires the RangeTracker below + // that itself needs FakeStorage so it cannot be define inline with the other methods. + void CheckSameAs(const SubresourceStorage& real); -struct SmallData { - uint32_t value = 0xF00; -}; + Aspect mAspects; + uint32_t mArrayLayerCount; + uint32_t mMipLevelCount; -bool operator==(const SmallData& a, const SmallData& b) { - return a.value == b.value; -} + std::vector mData; + }; -// Test that the default value is correctly set. -TEST(SubresourceStorageTest, DefaultValue) { - // Test setting no default value for a primitive type. - { - SubresourceStorage s(Aspect::Color, 3, 5); - EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 0); + // Track a set of ranges that have been seen and can assert that in aggregate they make exactly + // a single range (and that each subresource was seen only once). + struct RangeTracker { + template + explicit RangeTracker(const SubresourceStorage& s) + : mTracked(s.GetAspectsForTesting(), + s.GetArrayLayerCountForTesting(), + s.GetMipLevelCountForTesting(), + 0) { + } - FakeStorage f(Aspect::Color, 3, 5); - f.CheckSameAs(s); + void Track(const SubresourceRange& range) { + // Add +1 to the subresources tracked. + mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) { + ASSERT_EQ(*counter, 0u); + *counter += 1; + }); + } + + void CheckTrackedExactly(const SubresourceRange& range) { + // Check that all subresources in the range were tracked once and set the counter back + // to 0. + mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) { + ASSERT_EQ(*counter, 1u); + *counter = 0; + }); + + // Now all subresources should be at 0. + for (int counter : mTracked.mData) { + ASSERT_EQ(counter, 0); + } + } + + FakeStorage mTracked; + }; + + template + void FakeStorage::CheckSameAs(const SubresourceStorage& real) { + EXPECT_EQ(real.GetAspectsForTesting(), mAspects); + EXPECT_EQ(real.GetArrayLayerCountForTesting(), mArrayLayerCount); + EXPECT_EQ(real.GetMipLevelCountForTesting(), mMipLevelCount); + + RangeTracker tracker(real); + real.Iterate([&](const SubresourceRange& range, const T& data) { + // Check that the range is sensical. + EXPECT_TRUE(IsSubset(range.aspects, mAspects)); + + EXPECT_LT(range.baseArrayLayer, mArrayLayerCount); + EXPECT_LE(range.baseArrayLayer + range.layerCount, mArrayLayerCount); + + EXPECT_LT(range.baseMipLevel, mMipLevelCount); + EXPECT_LE(range.baseMipLevel + range.levelCount, mMipLevelCount); + + for (Aspect aspect : IterateEnumMask(range.aspects)) { + for (uint32_t layer = range.baseArrayLayer; + layer < range.baseArrayLayer + range.layerCount; layer++) { + for (uint32_t level = range.baseMipLevel; + level < range.baseMipLevel + range.levelCount; level++) { + EXPECT_EQ(data, Get(aspect, layer, level)); + EXPECT_EQ(data, real.Get(aspect, layer, level)); + } + } + } + + tracker.Track(range); + }); + + tracker.CheckTrackedExactly( + SubresourceRange::MakeFull(mAspects, mArrayLayerCount, mMipLevelCount)); } - // Test setting a default value for a primitive type. - { - SubresourceStorage s(Aspect::Color, 3, 5, 42); - EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 42); + template + void CheckAspectCompressed(const SubresourceStorage& s, Aspect aspect, bool expected) { + ASSERT(HasOneBit(aspect)); - FakeStorage f(Aspect::Color, 3, 5, 42); - f.CheckSameAs(s); - } + uint32_t levelCount = s.GetMipLevelCountForTesting(); + uint32_t layerCount = s.GetArrayLayerCountForTesting(); - // Test setting no default value for a type with a default constructor. - { - SubresourceStorage s(Aspect::Color, 3, 5); - EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 0xF00u); + bool seen = false; + s.Iterate([&](const SubresourceRange& range, const T&) { + if (range.aspects == aspect && range.layerCount == layerCount && + range.levelCount == levelCount && range.baseArrayLayer == 0 && + range.baseMipLevel == 0) { + seen = true; + } + }); - FakeStorage f(Aspect::Color, 3, 5); - f.CheckSameAs(s); - } - // Test setting a default value for a type with a default constructor. - { - SubresourceStorage s(Aspect::Color, 3, 5, {007u}); - EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 007u); + ASSERT_EQ(seen, expected); - FakeStorage f(Aspect::Color, 3, 5, {007u}); - f.CheckSameAs(s); - } -} - -// The tests for Update() all follow the same pattern of setting up a real and a fake storage then -// performing one or multiple Update()s on them and checking: -// - They have the same content. -// - The Update() range was correct. -// - The aspects and layers have the expected "compressed" status. - -// Calls Update both on the read storage and the fake storage but intercepts the call to updateFunc -// done by the real storage to check their ranges argument aggregate to exactly the update range. -template -void CallUpdateOnBoth(SubresourceStorage* s, - FakeStorage* f, - const SubresourceRange& range, - F&& updateFunc) { - RangeTracker tracker(*s); - - s->Update(range, [&](const SubresourceRange& range, T* data) { - tracker.Track(range); - updateFunc(range, data); - }); - f->Update(range, updateFunc); - - tracker.CheckTrackedExactly(range); - f->CheckSameAs(*s); -} - -// Test updating a single subresource on a single-aspect storage. -TEST(SubresourceStorageTest, SingleSubresourceUpdateSingleAspect) { - SubresourceStorage s(Aspect::Color, 5, 7); - FakeStorage f(Aspect::Color, 5, 7); - - // Update a single subresource. - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 3, 2); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; }); - - CheckAspectCompressed(s, Aspect::Color, false); - CheckLayerCompressed(s, Aspect::Color, 2, true); - CheckLayerCompressed(s, Aspect::Color, 3, false); - CheckLayerCompressed(s, Aspect::Color, 4, true); -} - -// Test updating a single subresource on a multi-aspect storage. -TEST(SubresourceStorageTest, SingleSubresourceUpdateMultiAspect) { - SubresourceStorage s(Aspect::Depth | Aspect::Stencil, 5, 3); - FakeStorage f(Aspect::Depth | Aspect::Stencil, 5, 3); - - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Stencil, 1, 2); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; }); - - CheckAspectCompressed(s, Aspect::Depth, true); - CheckAspectCompressed(s, Aspect::Stencil, false); - CheckLayerCompressed(s, Aspect::Stencil, 0, true); - CheckLayerCompressed(s, Aspect::Stencil, 1, false); - CheckLayerCompressed(s, Aspect::Stencil, 2, true); -} - -// Test updating as a stipple pattern on one of two aspects then updating it completely. -TEST(SubresourceStorageTest, UpdateStipple) { - const uint32_t kLayers = 10; - const uint32_t kLevels = 7; - SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - - // Update with a stipple. - for (uint32_t layer = 0; layer < kLayers; layer++) { - for (uint32_t level = 0; level < kLevels; level++) { - if ((layer + level) % 2 == 0) { - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Depth, layer, level); - CallUpdateOnBoth(&s, &f, range, - [](const SubresourceRange&, int* data) { *data += 17; }); + // Check that the internal state of SubresourceStorage matches what we expect. + // If an aspect is compressed, all its layers should be internally tagged as compressed. + ASSERT_EQ(s.IsAspectCompressedForTesting(aspect), expected); + if (expected) { + for (uint32_t layer = 0; layer < s.GetArrayLayerCountForTesting(); layer++) { + ASSERT_TRUE(s.IsLayerCompressedForTesting(aspect, layer)); } } } - // The depth should be fully uncompressed while the stencil stayed compressed. - CheckAspectCompressed(s, Aspect::Stencil, true); - CheckAspectCompressed(s, Aspect::Depth, false); - for (uint32_t layer = 0; layer < kLayers; layer++) { - CheckLayerCompressed(s, Aspect::Depth, layer, false); + template + void CheckLayerCompressed(const SubresourceStorage& s, + Aspect aspect, + uint32_t layer, + bool expected) { + ASSERT(HasOneBit(aspect)); + + uint32_t levelCount = s.GetMipLevelCountForTesting(); + + bool seen = false; + s.Iterate([&](const SubresourceRange& range, const T&) { + if (range.aspects == aspect && range.layerCount == 1 && + range.levelCount == levelCount && range.baseArrayLayer == layer && + range.baseMipLevel == 0) { + seen = true; + } + }); + + ASSERT_EQ(seen, expected); + ASSERT_EQ(s.IsLayerCompressedForTesting(aspect, layer), expected); } - // Update completely with a single value. Recompression should happen! - { - SubresourceRange fullRange = - SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 31; }); + struct SmallData { + uint32_t value = 0xF00; + }; + + bool operator==(const SmallData& a, const SmallData& b) { + return a.value == b.value; } - CheckAspectCompressed(s, Aspect::Depth, true); - CheckAspectCompressed(s, Aspect::Stencil, true); -} + // Test that the default value is correctly set. + TEST(SubresourceStorageTest, DefaultValue) { + // Test setting no default value for a primitive type. + { + SubresourceStorage s(Aspect::Color, 3, 5); + EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 0); -// Test updating as a crossing band pattern: -// - The first band is full layers [2, 3] on both aspects -// - The second band is full mips [5, 6] on one aspect. -// Then updating completely. -TEST(SubresourceStorageTest, UpdateTwoBand) { - const uint32_t kLayers = 5; - const uint32_t kLevels = 9; - SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + FakeStorage f(Aspect::Color, 3, 5); + f.CheckSameAs(s); + } - // Update the two bands - { - SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels}); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; }); + // Test setting a default value for a primitive type. + { + SubresourceStorage s(Aspect::Color, 3, 5, 42); + EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 42); + + FakeStorage f(Aspect::Color, 3, 5, 42); + f.CheckSameAs(s); + } + + // Test setting no default value for a type with a default constructor. + { + SubresourceStorage s(Aspect::Color, 3, 5); + EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 0xF00u); + + FakeStorage f(Aspect::Color, 3, 5); + f.CheckSameAs(s); + } + // Test setting a default value for a type with a default constructor. + { + SubresourceStorage s(Aspect::Color, 3, 5, {007u}); + EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 007u); + + FakeStorage f(Aspect::Color, 3, 5, {007u}); + f.CheckSameAs(s); + } } - // The layers were fully updated so they should stay compressed. - CheckLayerCompressed(s, Aspect::Depth, 2, true); - CheckLayerCompressed(s, Aspect::Depth, 3, true); - CheckLayerCompressed(s, Aspect::Stencil, 2, true); - CheckLayerCompressed(s, Aspect::Stencil, 3, true); + // The tests for Update() all follow the same pattern of setting up a real and a fake storage + // then performing one or multiple Update()s on them and checking: + // - They have the same content. + // - The Update() range was correct. + // - The aspects and layers have the expected "compressed" status. - { - SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2}); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; }); + // Calls Update both on the read storage and the fake storage but intercepts the call to + // updateFunc done by the real storage to check their ranges argument aggregate to exactly the + // update range. + template + void CallUpdateOnBoth(SubresourceStorage* s, + FakeStorage* f, + const SubresourceRange& range, + F&& updateFunc) { + RangeTracker tracker(*s); + + s->Update(range, [&](const SubresourceRange& range, T* data) { + tracker.Track(range); + updateFunc(range, data); + }); + f->Update(range, updateFunc); + + tracker.CheckTrackedExactly(range); + f->CheckSameAs(*s); } - // The layers had to be decompressed in depth - CheckLayerCompressed(s, Aspect::Depth, 2, false); - CheckLayerCompressed(s, Aspect::Depth, 3, false); - CheckLayerCompressed(s, Aspect::Stencil, 2, true); - CheckLayerCompressed(s, Aspect::Stencil, 3, true); + // Test updating a single subresource on a single-aspect storage. + TEST(SubresourceStorageTest, SingleSubresourceUpdateSingleAspect) { + SubresourceStorage s(Aspect::Color, 5, 7); + FakeStorage f(Aspect::Color, 5, 7); - // Update completely. Without a single value recompression shouldn't happen. - { - SubresourceRange fullRange = - SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - CallUpdateOnBoth(&s, &f, fullRange, - [](const SubresourceRange&, int* data) { *data += 12; }); - } - - CheckAspectCompressed(s, Aspect::Depth, false); - CheckAspectCompressed(s, Aspect::Stencil, false); -} - -// Test updating with extremal subresources -// - Then half of the array layers in full. -// - Then updating completely. -TEST(SubresourceStorageTest, UpdateExtremas) { - const uint32_t kLayers = 6; - const uint32_t kLevels = 4; - SubresourceStorage s(Aspect::Color, kLayers, kLevels); - FakeStorage f(Aspect::Color, kLayers, kLevels); - - // Update the two extrema - { - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, kLevels - 1); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; }); - } - { - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, kLayers - 1, 0); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; }); - } - - CheckLayerCompressed(s, Aspect::Color, 0, false); - CheckLayerCompressed(s, Aspect::Color, 1, true); - CheckLayerCompressed(s, Aspect::Color, kLayers - 2, true); - CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false); - - // Update half of the layers in full with constant values. Some recompression should happen. - { - SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels}); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 123; }); - } - - CheckLayerCompressed(s, Aspect::Color, 0, true); - CheckLayerCompressed(s, Aspect::Color, 1, true); - CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false); - - // Update completely. Recompression should happen! - { - SubresourceRange fullRange = SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels); - CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 35; }); - } - - CheckAspectCompressed(s, Aspect::Color, true); -} - -// A regression test for an issue found while reworking the implementation where RecompressAspect -// didn't correctly check that each each layer was compressed but only that their 0th value was -// the same. -TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) { - SubresourceStorage s(Aspect::Color, 2, 2); - FakeStorage f(Aspect::Color, 2, 2); - - // Update 0th mip levels to some value, it should decompress the aspect and both layers. - { - SubresourceRange range(Aspect::Color, {0, 2}, {0, 1}); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 17; }); - } - - CheckAspectCompressed(s, Aspect::Color, false); - CheckLayerCompressed(s, Aspect::Color, 0, false); - CheckLayerCompressed(s, Aspect::Color, 1, false); - - // Update the whole resource by doing +1. The aspects and layers should stay decompressed. - { - SubresourceRange range = SubresourceRange::MakeFull(Aspect::Color, 2, 2); + // Update a single subresource. + SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 3, 2); CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; }); + + CheckAspectCompressed(s, Aspect::Color, false); + CheckLayerCompressed(s, Aspect::Color, 2, true); + CheckLayerCompressed(s, Aspect::Color, 3, false); + CheckLayerCompressed(s, Aspect::Color, 4, true); } - CheckAspectCompressed(s, Aspect::Color, false); - CheckLayerCompressed(s, Aspect::Color, 0, false); - CheckLayerCompressed(s, Aspect::Color, 1, false); -} + // Test updating a single subresource on a multi-aspect storage. + TEST(SubresourceStorageTest, SingleSubresourceUpdateMultiAspect) { + SubresourceStorage s(Aspect::Depth | Aspect::Stencil, 5, 3); + FakeStorage f(Aspect::Depth | Aspect::Stencil, 5, 3); -// The tests for Merge() all follow the same as the Update() tests except that they use Update() -// to set up the test storages. + SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Stencil, 1, 2); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; }); -// Similar to CallUpdateOnBoth but for Merge -template -void CallMergeOnBoth(SubresourceStorage* s, - FakeStorage* f, - const SubresourceStorage& other, - F&& mergeFunc) { - RangeTracker tracker(*s); - - s->Merge(other, [&](const SubresourceRange& range, T* data, const U& otherData) { - tracker.Track(range); - mergeFunc(range, data, otherData); - }); - f->Merge(other, mergeFunc); - - tracker.CheckTrackedExactly( - SubresourceRange::MakeFull(f->mAspects, f->mArrayLayerCount, f->mMipLevelCount)); - f->CheckSameAs(*s); -} - -// Test merging two fully compressed single-aspect resources. -TEST(SubresourceStorageTest, MergeFullWithFullSingleAspect) { - SubresourceStorage s(Aspect::Color, 4, 6); - FakeStorage f(Aspect::Color, 4, 6); - - // Merge the whole resource in a single call. - SubresourceStorage other(Aspect::Color, 4, 6, true); - CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) { - if (other) { - *data = 13; - } - }); - - CheckAspectCompressed(s, Aspect::Color, true); -} - -// Test merging two fully compressed multi-aspect resources. -TEST(SubresourceStorageTest, MergeFullWithFullMultiAspect) { - SubresourceStorage s(Aspect::Depth | Aspect::Stencil, 6, 7); - FakeStorage f(Aspect::Depth | Aspect::Stencil, 6, 7); - - // Merge the whole resource in a single call. - SubresourceStorage other(Aspect::Depth | Aspect::Stencil, 6, 7, true); - CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) { - if (other) { - *data = 13; - } - }); - - CheckAspectCompressed(s, Aspect::Depth, true); - CheckAspectCompressed(s, Aspect::Stencil, true); -} - -// Test merging a fully compressed resource in a resource with the "cross band" pattern. -// - The first band is full layers [2, 3] on both aspects -// - The second band is full mips [5, 6] on one aspect. -// This provides coverage of using a single piece of data from `other` to update all of `s` -TEST(SubresourceStorageTest, MergeFullInTwoBand) { - const uint32_t kLayers = 5; - const uint32_t kLevels = 9; - SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - - // Update the two bands - { - SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels}); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; }); - } - { - SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2}); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 5; }); + CheckAspectCompressed(s, Aspect::Depth, true); + CheckAspectCompressed(s, Aspect::Stencil, false); + CheckLayerCompressed(s, Aspect::Stencil, 0, true); + CheckLayerCompressed(s, Aspect::Stencil, 1, false); + CheckLayerCompressed(s, Aspect::Stencil, 2, true); } - // Merge the fully compressed resource. - SubresourceStorage other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 17); - CallMergeOnBoth(&s, &f, other, - [](const SubresourceRange&, int* data, int other) { *data += other; }); + // Test updating as a stipple pattern on one of two aspects then updating it completely. + TEST(SubresourceStorageTest, UpdateStipple) { + const uint32_t kLayers = 10; + const uint32_t kLevels = 7; + SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - // The layers traversed by the mip band are still uncompressed. - CheckLayerCompressed(s, Aspect::Depth, 1, false); - CheckLayerCompressed(s, Aspect::Depth, 2, false); - CheckLayerCompressed(s, Aspect::Depth, 3, false); - CheckLayerCompressed(s, Aspect::Depth, 4, false); - - // Stencil is decompressed but all its layers are still compressed because there wasn't the mip - // band. - CheckAspectCompressed(s, Aspect::Stencil, false); - CheckLayerCompressed(s, Aspect::Stencil, 1, true); - CheckLayerCompressed(s, Aspect::Stencil, 2, true); - CheckLayerCompressed(s, Aspect::Stencil, 3, true); - CheckLayerCompressed(s, Aspect::Stencil, 4, true); -} -// Test the reverse, mergign two-bands in a full resource. This provides coverage for decompressing -// aspects / and partilly layers to match the compression of `other` -TEST(SubresourceStorageTest, MergeTwoBandInFull) { - const uint32_t kLayers = 5; - const uint32_t kLevels = 9; - SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75); - FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75); - - // Update the two bands - SubresourceStorage other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - { - SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels}); - other.Update(range, [](const SubresourceRange&, int* data) { *data += 3; }); - } - { - SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2}); - other.Update(range, [](const SubresourceRange&, int* data) { *data += 5; }); - } - - // Merge the fully compressed resource. - CallMergeOnBoth(&s, &f, other, - [](const SubresourceRange&, int* data, int other) { *data += other; }); - - // The layers traversed by the mip band are still uncompressed. - CheckLayerCompressed(s, Aspect::Depth, 1, false); - CheckLayerCompressed(s, Aspect::Depth, 2, false); - CheckLayerCompressed(s, Aspect::Depth, 3, false); - CheckLayerCompressed(s, Aspect::Depth, 4, false); - - // Stencil is decompressed but all its layers are still compressed because there wasn't the mip - // band. - CheckAspectCompressed(s, Aspect::Stencil, false); - CheckLayerCompressed(s, Aspect::Stencil, 1, true); - CheckLayerCompressed(s, Aspect::Stencil, 2, true); - CheckLayerCompressed(s, Aspect::Stencil, 3, true); - CheckLayerCompressed(s, Aspect::Stencil, 4, true); -} - -// Test merging storage with a layer band in a stipple patterned storage. This provide coverage -// for the code path that uses the same layer data for other multiple times. -TEST(SubresourceStorageTest, MergeLayerBandInStipple) { - const uint32_t kLayers = 3; - const uint32_t kLevels = 5; - - SubresourceStorage s(Aspect::Color, kLayers, kLevels); - FakeStorage f(Aspect::Color, kLayers, kLevels); - SubresourceStorage other(Aspect::Color, kLayers, kLevels); - - for (uint32_t layer = 0; layer < kLayers; layer++) { - for (uint32_t level = 0; level < kLevels; level++) { - if ((layer + level) % 2 == 0) { - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, layer, level); - CallUpdateOnBoth(&s, &f, range, - [](const SubresourceRange&, int* data) { *data += 17; }); + // Update with a stipple. + for (uint32_t layer = 0; layer < kLayers; layer++) { + for (uint32_t level = 0; level < kLevels; level++) { + if ((layer + level) % 2 == 0) { + SubresourceRange range = + SubresourceRange::MakeSingle(Aspect::Depth, layer, level); + CallUpdateOnBoth(&s, &f, range, + [](const SubresourceRange&, int* data) { *data += 17; }); + } } } - if (layer % 2 == 0) { - other.Update({Aspect::Color, {layer, 1}, {0, kLevels}}, - [](const SubresourceRange&, int* data) { *data += 8; }); + + // The depth should be fully uncompressed while the stencil stayed compressed. + CheckAspectCompressed(s, Aspect::Stencil, true); + CheckAspectCompressed(s, Aspect::Depth, false); + for (uint32_t layer = 0; layer < kLayers; layer++) { + CheckLayerCompressed(s, Aspect::Depth, layer, false); } + + // Update completely with a single value. Recompression should happen! + { + SubresourceRange fullRange = + SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + CallUpdateOnBoth(&s, &f, fullRange, + [](const SubresourceRange&, int* data) { *data = 31; }); + } + + CheckAspectCompressed(s, Aspect::Depth, true); + CheckAspectCompressed(s, Aspect::Stencil, true); } - // Merge the band in the stipple. - CallMergeOnBoth(&s, &f, other, - [](const SubresourceRange&, int* data, int other) { *data += other; }); + // Test updating as a crossing band pattern: + // - The first band is full layers [2, 3] on both aspects + // - The second band is full mips [5, 6] on one aspect. + // Then updating completely. + TEST(SubresourceStorageTest, UpdateTwoBand) { + const uint32_t kLayers = 5; + const uint32_t kLevels = 9; + SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); - // None of the resulting layers are compressed. - CheckLayerCompressed(s, Aspect::Color, 0, false); - CheckLayerCompressed(s, Aspect::Color, 1, false); - CheckLayerCompressed(s, Aspect::Color, 2, false); -} + // Update the two bands + { + SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels}); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; }); + } -// Regression test for a missing check that layer 0 is compressed when recompressing. -TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) { - const uint32_t kLayers = 2; - const uint32_t kLevels = 2; - SubresourceStorage s(Aspect::Color, kLayers, kLevels); - FakeStorage f(Aspect::Color, kLayers, kLevels); + // The layers were fully updated so they should stay compressed. + CheckLayerCompressed(s, Aspect::Depth, 2, true); + CheckLayerCompressed(s, Aspect::Depth, 3, true); + CheckLayerCompressed(s, Aspect::Stencil, 2, true); + CheckLayerCompressed(s, Aspect::Stencil, 3, true); - // Set up s with zeros except (0, 1) which is garbage. - { - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; }); + { + SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2}); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; }); + } + + // The layers had to be decompressed in depth + CheckLayerCompressed(s, Aspect::Depth, 2, false); + CheckLayerCompressed(s, Aspect::Depth, 3, false); + CheckLayerCompressed(s, Aspect::Stencil, 2, true); + CheckLayerCompressed(s, Aspect::Stencil, 3, true); + + // Update completely. Without a single value recompression shouldn't happen. + { + SubresourceRange fullRange = + SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + CallUpdateOnBoth(&s, &f, fullRange, + [](const SubresourceRange&, int* data) { *data += 12; }); + } + + CheckAspectCompressed(s, Aspect::Depth, false); + CheckAspectCompressed(s, Aspect::Stencil, false); } - // Other is 2x2 of zeroes - SubresourceStorage other(Aspect::Color, kLayers, kLevels); + // Test updating with extremal subresources + // - Then half of the array layers in full. + // - Then updating completely. + TEST(SubresourceStorageTest, UpdateExtremas) { + const uint32_t kLayers = 6; + const uint32_t kLevels = 4; + SubresourceStorage s(Aspect::Color, kLayers, kLevels); + FakeStorage f(Aspect::Color, kLayers, kLevels); - // Fake updating F with other which is fully compressed and will trigger recompression. - CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int*, int) {}); + // Update the two extrema + { + SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, kLevels - 1); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; }); + } + { + SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, kLayers - 1, 0); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; }); + } - // The Color aspect should not have been recompressed. - CheckAspectCompressed(s, Aspect::Color, false); - CheckLayerCompressed(s, Aspect::Color, 0, false); -} + CheckLayerCompressed(s, Aspect::Color, 0, false); + CheckLayerCompressed(s, Aspect::Color, 1, true); + CheckLayerCompressed(s, Aspect::Color, kLayers - 2, true); + CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false); -// Regression test for aspect decompression not copying to layer 0 -TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) { - const uint32_t kLayers = 2; - const uint32_t kLevels = 2; - SubresourceStorage s(Aspect::Color, kLayers, kLevels, 3); - FakeStorage f(Aspect::Color, kLayers, kLevels, 3); + // Update half of the layers in full with constant values. Some recompression should happen. + { + SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels}); + CallUpdateOnBoth(&s, &f, range, + [](const SubresourceRange&, int* data) { *data = 123; }); + } - // Cause decompression by writing to a single subresource. - { - SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1); - CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; }); + CheckLayerCompressed(s, Aspect::Color, 0, true); + CheckLayerCompressed(s, Aspect::Color, 1, true); + CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false); + + // Update completely. Recompression should happen! + { + SubresourceRange fullRange = + SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels); + CallUpdateOnBoth(&s, &f, fullRange, + [](const SubresourceRange&, int* data) { *data = 35; }); + } + + CheckAspectCompressed(s, Aspect::Color, true); } - // Check that the aspect's value of 3 was correctly decompressed in layer 0. - CheckLayerCompressed(s, Aspect::Color, 0, true); - EXPECT_EQ(3, s.Get(Aspect::Color, 0, 0)); - EXPECT_EQ(3, s.Get(Aspect::Color, 0, 1)); -} + // A regression test for an issue found while reworking the implementation where + // RecompressAspect didn't correctly check that each each layer was compressed but only that + // their 0th value was the same. + TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) { + SubresourceStorage s(Aspect::Color, 2, 2); + FakeStorage f(Aspect::Color, 2, 2); -// Bugs found while testing: -// - mLayersCompressed not initialized to true. -// - DecompressLayer setting Compressed to true instead of false. -// - Get() checking for !compressed instead of compressed for the early exit. -// - ASSERT in RecompressLayers was inverted. -// - Two != being converted to == during a rework. -// - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed. -// - Missing decompression of layer 0 after introducing mInlineAspectData. + // Update 0th mip levels to some value, it should decompress the aspect and both layers. + { + SubresourceRange range(Aspect::Color, {0, 2}, {0, 1}); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 17; }); + } + + CheckAspectCompressed(s, Aspect::Color, false); + CheckLayerCompressed(s, Aspect::Color, 0, false); + CheckLayerCompressed(s, Aspect::Color, 1, false); + + // Update the whole resource by doing +1. The aspects and layers should stay decompressed. + { + SubresourceRange range = SubresourceRange::MakeFull(Aspect::Color, 2, 2); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; }); + } + + CheckAspectCompressed(s, Aspect::Color, false); + CheckLayerCompressed(s, Aspect::Color, 0, false); + CheckLayerCompressed(s, Aspect::Color, 1, false); + } + + // The tests for Merge() all follow the same as the Update() tests except that they use Update() + // to set up the test storages. + + // Similar to CallUpdateOnBoth but for Merge + template + void CallMergeOnBoth(SubresourceStorage* s, + FakeStorage* f, + const SubresourceStorage& other, + F&& mergeFunc) { + RangeTracker tracker(*s); + + s->Merge(other, [&](const SubresourceRange& range, T* data, const U& otherData) { + tracker.Track(range); + mergeFunc(range, data, otherData); + }); + f->Merge(other, mergeFunc); + + tracker.CheckTrackedExactly( + SubresourceRange::MakeFull(f->mAspects, f->mArrayLayerCount, f->mMipLevelCount)); + f->CheckSameAs(*s); + } + + // Test merging two fully compressed single-aspect resources. + TEST(SubresourceStorageTest, MergeFullWithFullSingleAspect) { + SubresourceStorage s(Aspect::Color, 4, 6); + FakeStorage f(Aspect::Color, 4, 6); + + // Merge the whole resource in a single call. + SubresourceStorage other(Aspect::Color, 4, 6, true); + CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) { + if (other) { + *data = 13; + } + }); + + CheckAspectCompressed(s, Aspect::Color, true); + } + + // Test merging two fully compressed multi-aspect resources. + TEST(SubresourceStorageTest, MergeFullWithFullMultiAspect) { + SubresourceStorage s(Aspect::Depth | Aspect::Stencil, 6, 7); + FakeStorage f(Aspect::Depth | Aspect::Stencil, 6, 7); + + // Merge the whole resource in a single call. + SubresourceStorage other(Aspect::Depth | Aspect::Stencil, 6, 7, true); + CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) { + if (other) { + *data = 13; + } + }); + + CheckAspectCompressed(s, Aspect::Depth, true); + CheckAspectCompressed(s, Aspect::Stencil, true); + } + + // Test merging a fully compressed resource in a resource with the "cross band" pattern. + // - The first band is full layers [2, 3] on both aspects + // - The second band is full mips [5, 6] on one aspect. + // This provides coverage of using a single piece of data from `other` to update all of `s` + TEST(SubresourceStorageTest, MergeFullInTwoBand) { + const uint32_t kLayers = 5; + const uint32_t kLevels = 9; + SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + + // Update the two bands + { + SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels}); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; }); + } + { + SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2}); + CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 5; }); + } + + // Merge the fully compressed resource. + SubresourceStorage other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 17); + CallMergeOnBoth(&s, &f, other, + [](const SubresourceRange&, int* data, int other) { *data += other; }); + + // The layers traversed by the mip band are still uncompressed. + CheckLayerCompressed(s, Aspect::Depth, 1, false); + CheckLayerCompressed(s, Aspect::Depth, 2, false); + CheckLayerCompressed(s, Aspect::Depth, 3, false); + CheckLayerCompressed(s, Aspect::Depth, 4, false); + + // Stencil is decompressed but all its layers are still compressed because there wasn't the + // mip band. + CheckAspectCompressed(s, Aspect::Stencil, false); + CheckLayerCompressed(s, Aspect::Stencil, 1, true); + CheckLayerCompressed(s, Aspect::Stencil, 2, true); + CheckLayerCompressed(s, Aspect::Stencil, 3, true); + CheckLayerCompressed(s, Aspect::Stencil, 4, true); + } + // Test the reverse, mergign two-bands in a full resource. This provides coverage for + // decompressing aspects / and partilly layers to match the compression of `other` + TEST(SubresourceStorageTest, MergeTwoBandInFull) { + const uint32_t kLayers = 5; + const uint32_t kLevels = 9; + SubresourceStorage s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75); + FakeStorage f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75); + + // Update the two bands + SubresourceStorage other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels); + { + SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels}); + other.Update(range, [](const SubresourceRange&, int* data) { *data += 3; }); + } + { + SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2}); + other.Update(range, [](const SubresourceRange&, int* data) { *data += 5; }); + } + + // Merge the fully compressed resource. + CallMergeOnBoth(&s, &f, other, + [](const SubresourceRange&, int* data, int other) { *data += other; }); + + // The layers traversed by the mip band are still uncompressed. + CheckLayerCompressed(s, Aspect::Depth, 1, false); + CheckLayerCompressed(s, Aspect::Depth, 2, false); + CheckLayerCompressed(s, Aspect::Depth, 3, false); + CheckLayerCompressed(s, Aspect::Depth, 4, false); + + // Stencil is decompressed but all its layers are still compressed because there wasn't the + // mip band. + CheckAspectCompressed(s, Aspect::Stencil, false); + CheckLayerCompressed(s, Aspect::Stencil, 1, true); + CheckLayerCompressed(s, Aspect::Stencil, 2, true); + CheckLayerCompressed(s, Aspect::Stencil, 3, true); + CheckLayerCompressed(s, Aspect::Stencil, 4, true); + } + + // Test merging storage with a layer band in a stipple patterned storage. This provide coverage + // for the code path that uses the same layer data for other multiple times. + TEST(SubresourceStorageTest, MergeLayerBandInStipple) { + const uint32_t kLayers = 3; + const uint32_t kLevels = 5; + + SubresourceStorage s(Aspect::Color, kLayers, kLevels); + FakeStorage f(Aspect::Color, kLayers, kLevels); + SubresourceStorage other(Aspect::Color, kLayers, kLevels); + + for (uint32_t layer = 0; layer < kLayers; layer++) { + for (uint32_t level = 0; level < kLevels; level++) { + if ((layer + level) % 2 == 0) { + SubresourceRange range = + SubresourceRange::MakeSingle(Aspect::Color, layer, level); + CallUpdateOnBoth(&s, &f, range, + [](const SubresourceRange&, int* data) { *data += 17; }); + } + } + if (layer % 2 == 0) { + other.Update({Aspect::Color, {layer, 1}, {0, kLevels}}, + [](const SubresourceRange&, int* data) { *data += 8; }); + } + } + + // Merge the band in the stipple. + CallMergeOnBoth(&s, &f, other, + [](const SubresourceRange&, int* data, int other) { *data += other; }); + + // None of the resulting layers are compressed. + CheckLayerCompressed(s, Aspect::Color, 0, false); + CheckLayerCompressed(s, Aspect::Color, 1, false); + CheckLayerCompressed(s, Aspect::Color, 2, false); + } + + // Regression test for a missing check that layer 0 is compressed when recompressing. + TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) { + const uint32_t kLayers = 2; + const uint32_t kLevels = 2; + SubresourceStorage s(Aspect::Color, kLayers, kLevels); + FakeStorage f(Aspect::Color, kLayers, kLevels); + + // Set up s with zeros except (0, 1) which is garbage. + { + SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1); + CallUpdateOnBoth(&s, &f, range, + [](const SubresourceRange&, int* data) { *data += 0xABC; }); + } + + // Other is 2x2 of zeroes + SubresourceStorage other(Aspect::Color, kLayers, kLevels); + + // Fake updating F with other which is fully compressed and will trigger recompression. + CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int*, int) {}); + + // The Color aspect should not have been recompressed. + CheckAspectCompressed(s, Aspect::Color, false); + CheckLayerCompressed(s, Aspect::Color, 0, false); + } + + // Regression test for aspect decompression not copying to layer 0 + TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) { + const uint32_t kLayers = 2; + const uint32_t kLevels = 2; + SubresourceStorage s(Aspect::Color, kLayers, kLevels, 3); + FakeStorage f(Aspect::Color, kLayers, kLevels, 3); + + // Cause decompression by writing to a single subresource. + { + SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1); + CallUpdateOnBoth(&s, &f, range, + [](const SubresourceRange&, int* data) { *data += 0xABC; }); + } + + // Check that the aspect's value of 3 was correctly decompressed in layer 0. + CheckLayerCompressed(s, Aspect::Color, 0, true); + EXPECT_EQ(3, s.Get(Aspect::Color, 0, 0)); + EXPECT_EQ(3, s.Get(Aspect::Color, 0, 1)); + } + + // Bugs found while testing: + // - mLayersCompressed not initialized to true. + // - DecompressLayer setting Compressed to true instead of false. + // - Get() checking for !compressed instead of compressed for the early exit. + // - ASSERT in RecompressLayers was inverted. + // - Two != being converted to == during a rework. + // - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed. + // - Missing decompression of layer 0 after introducing mInlineAspectData. + +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/ToBackendTests.cpp b/src/dawn/tests/unittests/ToBackendTests.cpp index 398a9e0fdf..2ee3a75328 100644 --- a/src/dawn/tests/unittests/ToBackendTests.cpp +++ b/src/dawn/tests/unittests/ToBackendTests.cpp @@ -22,9 +22,6 @@ // Make our own Base - Backend object pair, reusing the AdapterBase name namespace dawn::native { class AdapterBase : public RefCounted {}; -} // namespace dawn::native - -using namespace dawn::native; class MyAdapter : public AdapterBase {}; @@ -85,3 +82,5 @@ TEST(ToBackend, Ref) { adapter->Release(); } } + +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp index 6f5439caf2..1f94b986fc 100644 --- a/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp +++ b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp @@ -24,507 +24,516 @@ #include "dawn/webgpu_cpp_print.h" #include "gtest/gtest.h" -using namespace dawn::native::d3d12; +namespace dawn::native::d3d12 { + namespace { -namespace { - - struct TextureSpec { - uint32_t x; - uint32_t y; - uint32_t z; - uint32_t width; - uint32_t height; - uint32_t depthOrArrayLayers; - uint32_t texelBlockSizeInBytes; - uint32_t blockWidth = 1; - uint32_t blockHeight = 1; - }; - - struct BufferSpec { - uint64_t offset; - uint32_t bytesPerRow; - uint32_t rowsPerImage; - }; - - // Check that each copy region fits inside the buffer footprint - void ValidateFootprints(const TextureSpec& textureSpec, - const BufferSpec& bufferSpec, - const TextureCopySubresource& copySplit, - wgpu::TextureDimension dimension) { - for (uint32_t i = 0; i < copySplit.count; ++i) { - const auto& copy = copySplit.copies[i]; - ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width); - ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height); - ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers, - copy.bufferSize.depthOrArrayLayers); - - // If there are multiple layers, 2D texture splitter actually splits each layer - // independently. See the details in Compute2DTextureCopySplits(). As a result, - // if we simply expand a copy region generated by 2D texture splitter to all - // layers, the copy region might be OOB. But that is not the approach that the current - // 2D texture splitter is doing, although Compute2DTextureCopySubresource forwards - // "copySize.depthOrArrayLayers" to the copy region it generated. So skip the test - // below for 2D textures with multiple layers. - if (textureSpec.depthOrArrayLayers <= 1 || dimension == wgpu::TextureDimension::e3D) { - uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth; - uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight; - uint64_t minimumRequiredBufferSize = - bufferSpec.offset + - utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, bufferSpec.rowsPerImage, - widthInBlocks, heightInBlocks, - textureSpec.depthOrArrayLayers, - textureSpec.texelBlockSizeInBytes); - - // The last pixel (buffer footprint) of each copy region depends on its bufferOffset - // and copySize. It is not the last pixel where the bufferSize ends. - ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u); - ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u); - uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width; - ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u); - uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth; - - ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u); - ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u); - uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height; - ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u); - uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight; - - uint64_t bufferSizeForFootprint = - copy.alignedOffset + - utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height, - footprintWidthInBlocks, footprintHeightInBlocks, - copy.bufferSize.depthOrArrayLayers, - textureSpec.texelBlockSizeInBytes); - - // The buffer footprint of each copy region should not exceed the minimum required - // buffer size. Otherwise, pixels accessed by copy may be OOB. - ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize); - } - } - } - - // Check that the offset is aligned - void ValidateOffset(const TextureCopySubresource& copySplit) { - for (uint32_t i = 0; i < copySplit.count; ++i) { - ASSERT_TRUE( - Align(copySplit.copies[i].alignedOffset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) == - copySplit.copies[i].alignedOffset); - } - } - - bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) { - return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB); - } - - // Check that no pair of copy regions intersect each other - void ValidateDisjoint(const TextureCopySubresource& copySplit) { - for (uint32_t i = 0; i < copySplit.count; ++i) { - const auto& a = copySplit.copies[i]; - for (uint32_t j = i + 1; j < copySplit.count; ++j) { - const auto& b = copySplit.copies[j]; - // If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and - // 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be - // [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are - // included. - bool overlapX = InclusiveRangesOverlap( - a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1, b.textureOffset.x, - b.textureOffset.x + b.copySize.width - 1); - bool overlapY = InclusiveRangesOverlap( - a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1, b.textureOffset.y, - b.textureOffset.y + b.copySize.height - 1); - bool overlapZ = InclusiveRangesOverlap( - a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1, - b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1); - ASSERT_TRUE(!overlapX || !overlapY || !overlapZ); - } - } - } - - // Check that the union of the copy regions exactly covers the texture region - void ValidateTextureBounds(const TextureSpec& textureSpec, - const TextureCopySubresource& copySplit) { - ASSERT_GT(copySplit.count, 0u); - - uint32_t minX = copySplit.copies[0].textureOffset.x; - uint32_t minY = copySplit.copies[0].textureOffset.y; - uint32_t minZ = copySplit.copies[0].textureOffset.z; - uint32_t maxX = copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width; - uint32_t maxY = copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height; - uint32_t maxZ = - copySplit.copies[0].textureOffset.z + copySplit.copies[0].copySize.depthOrArrayLayers; - - for (uint32_t i = 1; i < copySplit.count; ++i) { - const auto& copy = copySplit.copies[i]; - minX = std::min(minX, copy.textureOffset.x); - minY = std::min(minY, copy.textureOffset.y); - minZ = std::min(minZ, copy.textureOffset.z); - maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width); - maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height); - maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers); - } - - ASSERT_EQ(minX, textureSpec.x); - ASSERT_EQ(minY, textureSpec.y); - ASSERT_EQ(minZ, textureSpec.z); - ASSERT_EQ(maxX, textureSpec.x + textureSpec.width); - ASSERT_EQ(maxY, textureSpec.y + textureSpec.height); - ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers); - } - - // Validate that the number of pixels copied is exactly equal to the number of pixels in the - // texture region - void ValidatePixelCount(const TextureSpec& textureSpec, - const TextureCopySubresource& copySplit) { - uint32_t count = 0; - for (uint32_t i = 0; i < copySplit.count; ++i) { - const auto& copy = copySplit.copies[i]; - uint32_t copiedPixels = - copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers; - ASSERT_GT(copiedPixels, 0u); - count += copiedPixels; - } - ASSERT_EQ(count, textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers); - } - - // Check that every buffer offset is at the correct pixel location - void ValidateBufferOffset(const TextureSpec& textureSpec, - const BufferSpec& bufferSpec, - const TextureCopySubresource& copySplit, - wgpu::TextureDimension dimension) { - ASSERT_GT(copySplit.count, 0u); - - uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight; - for (uint32_t i = 0; i < copySplit.count; ++i) { - const auto& copy = copySplit.copies[i]; - - uint32_t bytesPerRowInTexels = - bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock; - uint32_t slicePitchInTexels = - bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight); - uint32_t absoluteTexelOffset = - copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock + - copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock + - copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels; - - // There is one empty row at most in a 2D copy region. However, it is not true for - // a 3D texture copy region when we are copying the last row of each slice. We may - // need to offset a lot rows and copy.bufferOffset.y may be big. - if (dimension == wgpu::TextureDimension::e2D) { - ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight); - } - ASSERT_EQ(copy.bufferOffset.z, 0u); - - ASSERT_GE(absoluteTexelOffset, - bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock); - uint32_t relativeTexelOffset = - absoluteTexelOffset - - bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock; - - uint32_t z = relativeTexelOffset / slicePitchInTexels; - uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels; - uint32_t x = relativeTexelOffset % bytesPerRowInTexels; - - ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x); - ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y); - ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z); - } - } - - void ValidateCopySplit(const TextureSpec& textureSpec, - const BufferSpec& bufferSpec, - const TextureCopySubresource& copySplit, - wgpu::TextureDimension dimension) { - ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension); - ValidateOffset(copySplit); - ValidateDisjoint(copySplit); - ValidateTextureBounds(textureSpec, copySplit); - ValidatePixelCount(textureSpec, copySplit); - ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension); - } - - std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) { - os << "TextureSpec(" - << "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), (" - << textureSpec.width << ", " << textureSpec.height << ", " - << textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes << ")"; - return os; - } - - std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) { - os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", " - << bufferSpec.rowsPerImage << ")"; - return os; - } - - std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) { - os << "CopySplit" << std::endl; - for (uint32_t i = 0; i < copySplit.count; ++i) { - const auto& copy = copySplit.copies[i]; - os << " " << i << ": Texture at (" << copy.textureOffset.x << ", " - << copy.textureOffset.y << ", " << copy.textureOffset.z << "), size (" - << copy.copySize.width << ", " << copy.copySize.height << ", " - << copy.copySize.depthOrArrayLayers << ")" << std::endl; - os << " " << i << ": Buffer at (" << copy.bufferOffset.x << ", " << copy.bufferOffset.y - << ", " << copy.bufferOffset.z << "), footprint (" << copy.bufferSize.width << ", " - << copy.bufferSize.height << ", " << copy.bufferSize.depthOrArrayLayers << ")" - << std::endl; - } - return os; - } - - // Define base texture sizes and offsets to test with: some aligned, some unaligned - constexpr TextureSpec kBaseTextureSpecs[] = { - {0, 0, 0, 1, 1, 1, 4}, - {0, 0, 0, 64, 1, 1, 4}, - {0, 0, 0, 128, 1, 1, 4}, - {0, 0, 0, 192, 1, 1, 4}, - {31, 16, 0, 1, 1, 1, 4}, - {64, 16, 0, 1, 1, 1, 4}, - {64, 16, 8, 1, 1, 1, 4}, - - {0, 0, 0, 64, 2, 1, 4}, - {0, 0, 0, 64, 1, 2, 4}, - {0, 0, 0, 64, 2, 2, 4}, - {0, 0, 0, 128, 2, 1, 4}, - {0, 0, 0, 128, 1, 2, 4}, - {0, 0, 0, 128, 2, 2, 4}, - {0, 0, 0, 192, 2, 1, 4}, - {0, 0, 0, 192, 1, 2, 4}, - {0, 0, 0, 192, 2, 2, 4}, - - {0, 0, 0, 1024, 1024, 1, 4}, - {256, 512, 0, 1024, 1024, 1, 4}, - {64, 48, 0, 1024, 1024, 1, 4}, - {64, 48, 16, 1024, 1024, 1024, 4}, - - {0, 0, 0, 257, 31, 1, 4}, - {0, 0, 0, 17, 93, 1, 4}, - {59, 13, 0, 257, 31, 1, 4}, - {17, 73, 0, 17, 93, 1, 4}, - {17, 73, 59, 17, 93, 99, 4}, - - {0, 0, 0, 4, 4, 1, 8, 4, 4}, - {64, 16, 0, 4, 4, 1, 8, 4, 4}, - {64, 16, 8, 4, 4, 1, 8, 4, 4}, - {0, 0, 0, 4, 4, 1, 16, 4, 4}, - {64, 16, 0, 4, 4, 1, 16, 4, 4}, - {64, 16, 8, 4, 4, 1, 16, 4, 4}, - - {0, 0, 0, 1024, 1024, 1, 8, 4, 4}, - {256, 512, 0, 1024, 1024, 1, 8, 4, 4}, - {64, 48, 0, 1024, 1024, 1, 8, 4, 4}, - {64, 48, 16, 1024, 1024, 1, 8, 4, 4}, - {0, 0, 0, 1024, 1024, 1, 16, 4, 4}, - {256, 512, 0, 1024, 1024, 1, 16, 4, 4}, - {64, 48, 0, 1024, 1024, 1, 4, 16, 4}, - {64, 48, 16, 1024, 1024, 1, 16, 4, 4}, - }; - - // Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow is - // the minimum required - std::array BaseBufferSpecs(const TextureSpec& textureSpec) { - uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width, - kTextureBytesPerRowAlignment); - - auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t { - return value == 0 ? 0 : ((value - 1) / size + 1) * size; + struct TextureSpec { + uint32_t x; + uint32_t y; + uint32_t z; + uint32_t width; + uint32_t height; + uint32_t depthOrArrayLayers; + uint32_t texelBlockSizeInBytes; + uint32_t blockWidth = 1; + uint32_t blockHeight = 1; }; - return { - BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height * 2}, - - BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height * 2}, - - BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height}, - BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow, - textureSpec.height * 2}, + struct BufferSpec { + uint64_t offset; + uint32_t bytesPerRow; + uint32_t rowsPerImage; }; - } - // Define a list of values to set properties in the spec structs - constexpr uint32_t kCheckValues[] = {1, 2, 3, 4, 5, 6, 7, 8, // small values - 16, 32, 64, 128, 256, 512, 1024, 2048, // powers of 2 - 15, 31, 63, 127, 257, 511, 1023, 2047, // misalignments - 17, 33, 65, 129, 257, 513, 1025, 2049}; + // Check that each copy region fits inside the buffer footprint + void ValidateFootprints(const TextureSpec& textureSpec, + const BufferSpec& bufferSpec, + const TextureCopySubresource& copySplit, + wgpu::TextureDimension dimension) { + for (uint32_t i = 0; i < copySplit.count; ++i) { + const auto& copy = copySplit.copies[i]; + ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width); + ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height); + ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers, + copy.bufferSize.depthOrArrayLayers); -} // namespace + // If there are multiple layers, 2D texture splitter actually splits each layer + // independently. See the details in Compute2DTextureCopySplits(). As a result, + // if we simply expand a copy region generated by 2D texture splitter to all + // layers, the copy region might be OOB. But that is not the approach that the + // current 2D texture splitter is doing, although Compute2DTextureCopySubresource + // forwards "copySize.depthOrArrayLayers" to the copy region it generated. So skip + // the test below for 2D textures with multiple layers. + if (textureSpec.depthOrArrayLayers <= 1 || + dimension == wgpu::TextureDimension::e3D) { + uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth; + uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight; + uint64_t minimumRequiredBufferSize = + bufferSpec.offset + + utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, bufferSpec.rowsPerImage, + widthInBlocks, heightInBlocks, + textureSpec.depthOrArrayLayers, + textureSpec.texelBlockSizeInBytes); -class CopySplitTest : public testing::TestWithParam { - protected: - void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) { - ASSERT(textureSpec.width % textureSpec.blockWidth == 0 && - textureSpec.height % textureSpec.blockHeight == 0); + // The last pixel (buffer footprint) of each copy region depends on its + // bufferOffset and copySize. It is not the last pixel where the bufferSize + // ends. + ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u); + ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u); + uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width; + ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u); + uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth; - wgpu::TextureDimension dimension = GetParam(); - TextureCopySubresource copySplit; - switch (dimension) { - case wgpu::TextureDimension::e2D: { - copySplit = Compute2DTextureCopySubresource( - {textureSpec.x, textureSpec.y, textureSpec.z}, - {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers}, - {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth, - textureSpec.blockHeight}, - bufferSpec.offset, bufferSpec.bytesPerRow); - break; + ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u); + ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u); + uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height; + ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u); + uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight; + + uint64_t bufferSizeForFootprint = + copy.alignedOffset + + utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height, + footprintWidthInBlocks, footprintHeightInBlocks, + copy.bufferSize.depthOrArrayLayers, + textureSpec.texelBlockSizeInBytes); + + // The buffer footprint of each copy region should not exceed the minimum + // required buffer size. Otherwise, pixels accessed by copy may be OOB. + ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize); + } } - case wgpu::TextureDimension::e3D: { - copySplit = Compute3DTextureCopySplits( - {textureSpec.x, textureSpec.y, textureSpec.z}, - {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers}, - {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth, - textureSpec.blockHeight}, - bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage); - break; - } - default: - UNREACHABLE(); - break; } - ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension); - - if (HasFatalFailure()) { - std::ostringstream message; - message << "Failed generating splits: " << textureSpec << ", " << bufferSpec - << std::endl - << dimension << " " << copySplit << std::endl; - FAIL() << message.str(); - } - } -}; - -TEST_P(CopySplitTest, General) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { - DoTest(textureSpec, bufferSpec); - } - } -} - -TEST_P(CopySplitTest, TextureWidth) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (uint32_t val : kCheckValues) { - if (val % textureSpec.blockWidth != 0) { - continue; + // Check that the offset is aligned + void ValidateOffset(const TextureCopySubresource& copySplit) { + for (uint32_t i = 0; i < copySplit.count; ++i) { + ASSERT_TRUE(Align(copySplit.copies[i].alignedOffset, + D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) == + copySplit.copies[i].alignedOffset); } - textureSpec.width = val; + } + + bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) { + return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB); + } + + // Check that no pair of copy regions intersect each other + void ValidateDisjoint(const TextureCopySubresource& copySplit) { + for (uint32_t i = 0; i < copySplit.count; ++i) { + const auto& a = copySplit.copies[i]; + for (uint32_t j = i + 1; j < copySplit.count; ++j) { + const auto& b = copySplit.copies[j]; + // If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and + // 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be + // [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are + // included. + bool overlapX = InclusiveRangesOverlap( + a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1, + b.textureOffset.x, b.textureOffset.x + b.copySize.width - 1); + bool overlapY = InclusiveRangesOverlap( + a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1, + b.textureOffset.y, b.textureOffset.y + b.copySize.height - 1); + bool overlapZ = InclusiveRangesOverlap( + a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1, + b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1); + ASSERT_TRUE(!overlapX || !overlapY || !overlapZ); + } + } + } + + // Check that the union of the copy regions exactly covers the texture region + void ValidateTextureBounds(const TextureSpec& textureSpec, + const TextureCopySubresource& copySplit) { + ASSERT_GT(copySplit.count, 0u); + + uint32_t minX = copySplit.copies[0].textureOffset.x; + uint32_t minY = copySplit.copies[0].textureOffset.y; + uint32_t minZ = copySplit.copies[0].textureOffset.z; + uint32_t maxX = + copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width; + uint32_t maxY = + copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height; + uint32_t maxZ = copySplit.copies[0].textureOffset.z + + copySplit.copies[0].copySize.depthOrArrayLayers; + + for (uint32_t i = 1; i < copySplit.count; ++i) { + const auto& copy = copySplit.copies[i]; + minX = std::min(minX, copy.textureOffset.x); + minY = std::min(minY, copy.textureOffset.y); + minZ = std::min(minZ, copy.textureOffset.z); + maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width); + maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height); + maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers); + } + + ASSERT_EQ(minX, textureSpec.x); + ASSERT_EQ(minY, textureSpec.y); + ASSERT_EQ(minZ, textureSpec.z); + ASSERT_EQ(maxX, textureSpec.x + textureSpec.width); + ASSERT_EQ(maxY, textureSpec.y + textureSpec.height); + ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers); + } + + // Validate that the number of pixels copied is exactly equal to the number of pixels in the + // texture region + void ValidatePixelCount(const TextureSpec& textureSpec, + const TextureCopySubresource& copySplit) { + uint32_t count = 0; + for (uint32_t i = 0; i < copySplit.count; ++i) { + const auto& copy = copySplit.copies[i]; + uint32_t copiedPixels = + copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers; + ASSERT_GT(copiedPixels, 0u); + count += copiedPixels; + } + ASSERT_EQ(count, + textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers); + } + + // Check that every buffer offset is at the correct pixel location + void ValidateBufferOffset(const TextureSpec& textureSpec, + const BufferSpec& bufferSpec, + const TextureCopySubresource& copySplit, + wgpu::TextureDimension dimension) { + ASSERT_GT(copySplit.count, 0u); + + uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight; + for (uint32_t i = 0; i < copySplit.count; ++i) { + const auto& copy = copySplit.copies[i]; + + uint32_t bytesPerRowInTexels = + bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock; + uint32_t slicePitchInTexels = + bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight); + uint32_t absoluteTexelOffset = + copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock + + copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock + + copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels; + + // There is one empty row at most in a 2D copy region. However, it is not true for + // a 3D texture copy region when we are copying the last row of each slice. We may + // need to offset a lot rows and copy.bufferOffset.y may be big. + if (dimension == wgpu::TextureDimension::e2D) { + ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight); + } + ASSERT_EQ(copy.bufferOffset.z, 0u); + + ASSERT_GE(absoluteTexelOffset, + bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock); + uint32_t relativeTexelOffset = + absoluteTexelOffset - + bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock; + + uint32_t z = relativeTexelOffset / slicePitchInTexels; + uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels; + uint32_t x = relativeTexelOffset % bytesPerRowInTexels; + + ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x); + ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y); + ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z); + } + } + + void ValidateCopySplit(const TextureSpec& textureSpec, + const BufferSpec& bufferSpec, + const TextureCopySubresource& copySplit, + wgpu::TextureDimension dimension) { + ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension); + ValidateOffset(copySplit); + ValidateDisjoint(copySplit); + ValidateTextureBounds(textureSpec, copySplit); + ValidatePixelCount(textureSpec, copySplit); + ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension); + } + + std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) { + os << "TextureSpec(" + << "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), (" + << textureSpec.width << ", " << textureSpec.height << ", " + << textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes + << ")"; + return os; + } + + std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) { + os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", " + << bufferSpec.rowsPerImage << ")"; + return os; + } + + std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) { + os << "CopySplit" << std::endl; + for (uint32_t i = 0; i < copySplit.count; ++i) { + const auto& copy = copySplit.copies[i]; + os << " " << i << ": Texture at (" << copy.textureOffset.x << ", " + << copy.textureOffset.y << ", " << copy.textureOffset.z << "), size (" + << copy.copySize.width << ", " << copy.copySize.height << ", " + << copy.copySize.depthOrArrayLayers << ")" << std::endl; + os << " " << i << ": Buffer at (" << copy.bufferOffset.x << ", " + << copy.bufferOffset.y << ", " << copy.bufferOffset.z << "), footprint (" + << copy.bufferSize.width << ", " << copy.bufferSize.height << ", " + << copy.bufferSize.depthOrArrayLayers << ")" << std::endl; + } + return os; + } + + // Define base texture sizes and offsets to test with: some aligned, some unaligned + constexpr TextureSpec kBaseTextureSpecs[] = { + {0, 0, 0, 1, 1, 1, 4}, + {0, 0, 0, 64, 1, 1, 4}, + {0, 0, 0, 128, 1, 1, 4}, + {0, 0, 0, 192, 1, 1, 4}, + {31, 16, 0, 1, 1, 1, 4}, + {64, 16, 0, 1, 1, 1, 4}, + {64, 16, 8, 1, 1, 1, 4}, + + {0, 0, 0, 64, 2, 1, 4}, + {0, 0, 0, 64, 1, 2, 4}, + {0, 0, 0, 64, 2, 2, 4}, + {0, 0, 0, 128, 2, 1, 4}, + {0, 0, 0, 128, 1, 2, 4}, + {0, 0, 0, 128, 2, 2, 4}, + {0, 0, 0, 192, 2, 1, 4}, + {0, 0, 0, 192, 1, 2, 4}, + {0, 0, 0, 192, 2, 2, 4}, + + {0, 0, 0, 1024, 1024, 1, 4}, + {256, 512, 0, 1024, 1024, 1, 4}, + {64, 48, 0, 1024, 1024, 1, 4}, + {64, 48, 16, 1024, 1024, 1024, 4}, + + {0, 0, 0, 257, 31, 1, 4}, + {0, 0, 0, 17, 93, 1, 4}, + {59, 13, 0, 257, 31, 1, 4}, + {17, 73, 0, 17, 93, 1, 4}, + {17, 73, 59, 17, 93, 99, 4}, + + {0, 0, 0, 4, 4, 1, 8, 4, 4}, + {64, 16, 0, 4, 4, 1, 8, 4, 4}, + {64, 16, 8, 4, 4, 1, 8, 4, 4}, + {0, 0, 0, 4, 4, 1, 16, 4, 4}, + {64, 16, 0, 4, 4, 1, 16, 4, 4}, + {64, 16, 8, 4, 4, 1, 16, 4, 4}, + + {0, 0, 0, 1024, 1024, 1, 8, 4, 4}, + {256, 512, 0, 1024, 1024, 1, 8, 4, 4}, + {64, 48, 0, 1024, 1024, 1, 8, 4, 4}, + {64, 48, 16, 1024, 1024, 1, 8, 4, 4}, + {0, 0, 0, 1024, 1024, 1, 16, 4, 4}, + {256, 512, 0, 1024, 1024, 1, 16, 4, 4}, + {64, 48, 0, 1024, 1024, 1, 4, 16, 4}, + {64, 48, 16, 1024, 1024, 1, 16, 4, 4}, + }; + + // Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow + // is the minimum required + std::array BaseBufferSpecs(const TextureSpec& textureSpec) { + uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width, + kTextureBytesPerRowAlignment); + + auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t { + return value == 0 ? 0 : ((value - 1) / size + 1) * size; + }; + + return { + BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height * 2}, + + BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height * 2}, + + BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height}, + BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow, + textureSpec.height * 2}, + }; + } + + // Define a list of values to set properties in the spec structs + constexpr uint32_t kCheckValues[] = { + 1, 2, 3, 4, 5, 6, 7, 8, // small values + 16, 32, 64, 128, 256, 512, 1024, 2048, // powers of 2 + 15, 31, 63, 127, 257, 511, 1023, 2047, // misalignments + 17, 33, 65, 129, 257, 513, 1025, 2049}; + + } // namespace + + class CopySplitTest : public testing::TestWithParam { + protected: + void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) { + ASSERT(textureSpec.width % textureSpec.blockWidth == 0 && + textureSpec.height % textureSpec.blockHeight == 0); + + wgpu::TextureDimension dimension = GetParam(); + TextureCopySubresource copySplit; + switch (dimension) { + case wgpu::TextureDimension::e2D: { + copySplit = Compute2DTextureCopySubresource( + {textureSpec.x, textureSpec.y, textureSpec.z}, + {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers}, + {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth, + textureSpec.blockHeight}, + bufferSpec.offset, bufferSpec.bytesPerRow); + break; + } + case wgpu::TextureDimension::e3D: { + copySplit = Compute3DTextureCopySplits( + {textureSpec.x, textureSpec.y, textureSpec.z}, + {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers}, + {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth, + textureSpec.blockHeight}, + bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage); + break; + } + default: + UNREACHABLE(); + break; + } + + ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension); + + if (HasFatalFailure()) { + std::ostringstream message; + message << "Failed generating splits: " << textureSpec << ", " << bufferSpec + << std::endl + << dimension << " " << copySplit << std::endl; + FAIL() << message.str(); + } + } + }; + + TEST_P(CopySplitTest, General) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { DoTest(textureSpec, bufferSpec); } } } -} -TEST_P(CopySplitTest, TextureHeight) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (uint32_t val : kCheckValues) { - if (val % textureSpec.blockHeight != 0) { - continue; - } - textureSpec.height = val; - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { - DoTest(textureSpec, bufferSpec); - } - } - } -} - -TEST_P(CopySplitTest, TextureX) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (uint32_t val : kCheckValues) { - textureSpec.x = val; - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { - DoTest(textureSpec, bufferSpec); - } - } - } -} - -TEST_P(CopySplitTest, TextureY) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (uint32_t val : kCheckValues) { - textureSpec.y = val; - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { - DoTest(textureSpec, bufferSpec); - } - } - } -} - -TEST_P(CopySplitTest, TexelSize) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (uint32_t texelSize : {4, 8, 16, 32, 64}) { - textureSpec.texelBlockSizeInBytes = texelSize; - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { - DoTest(textureSpec, bufferSpec); - } - } - } -} - -TEST_P(CopySplitTest, BufferOffset) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + TEST_P(CopySplitTest, TextureWidth) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { for (uint32_t val : kCheckValues) { - bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val; - - DoTest(textureSpec, bufferSpec); + if (val % textureSpec.blockWidth != 0) { + continue; + } + textureSpec.width = val; + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + DoTest(textureSpec, bufferSpec); + } } } } -} -TEST_P(CopySplitTest, RowPitch) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { - uint32_t baseRowPitch = bufferSpec.bytesPerRow; - for (uint32_t i = 0; i < 5; ++i) { - bufferSpec.bytesPerRow = baseRowPitch + i * 256; - - DoTest(textureSpec, bufferSpec); + TEST_P(CopySplitTest, TextureHeight) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { + for (uint32_t val : kCheckValues) { + if (val % textureSpec.blockHeight != 0) { + continue; + } + textureSpec.height = val; + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + DoTest(textureSpec, bufferSpec); + } } } } -} -TEST_P(CopySplitTest, ImageHeight) { - for (TextureSpec textureSpec : kBaseTextureSpecs) { - for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { - uint32_t baseImageHeight = bufferSpec.rowsPerImage; - for (uint32_t i = 0; i < 5; ++i) { - bufferSpec.rowsPerImage = baseImageHeight + i * 256; - - DoTest(textureSpec, bufferSpec); + TEST_P(CopySplitTest, TextureX) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { + for (uint32_t val : kCheckValues) { + textureSpec.x = val; + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + DoTest(textureSpec, bufferSpec); + } } } } -} -INSTANTIATE_TEST_SUITE_P(, - CopySplitTest, - testing::Values(wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D)); + TEST_P(CopySplitTest, TextureY) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { + for (uint32_t val : kCheckValues) { + textureSpec.y = val; + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + DoTest(textureSpec, bufferSpec); + } + } + } + } + + TEST_P(CopySplitTest, TexelSize) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { + for (uint32_t texelSize : {4, 8, 16, 32, 64}) { + textureSpec.texelBlockSizeInBytes = texelSize; + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + DoTest(textureSpec, bufferSpec); + } + } + } + } + + TEST_P(CopySplitTest, BufferOffset) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + for (uint32_t val : kCheckValues) { + bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val; + + DoTest(textureSpec, bufferSpec); + } + } + } + } + + TEST_P(CopySplitTest, RowPitch) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + uint32_t baseRowPitch = bufferSpec.bytesPerRow; + for (uint32_t i = 0; i < 5; ++i) { + bufferSpec.bytesPerRow = baseRowPitch + i * 256; + + DoTest(textureSpec, bufferSpec); + } + } + } + } + + TEST_P(CopySplitTest, ImageHeight) { + for (TextureSpec textureSpec : kBaseTextureSpecs) { + for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { + uint32_t baseImageHeight = bufferSpec.rowsPerImage; + for (uint32_t i = 0; i < 5; ++i) { + bufferSpec.rowsPerImage = baseImageHeight + i * 256; + + DoTest(textureSpec, bufferSpec); + } + } + } + } + + INSTANTIATE_TEST_SUITE_P(, + CopySplitTest, + testing::Values(wgpu::TextureDimension::e2D, + wgpu::TextureDimension::e3D)); + +} // namespace dawn::native::d3d12 diff --git a/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp index bb6d5ead72..ab6b152bef 100644 --- a/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp +++ b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp @@ -21,292 +21,295 @@ #include "dawn/tests/DawnNativeTest.h" #include "dawn/utils/WGPUHelpers.h" -class CommandBufferEncodingTests : public DawnNativeTest { - protected: - void ExpectCommands(dawn::native::CommandIterator* commands, - std::vector>> - expectedCommands) { - dawn::native::Command commandId; - for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) { - ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command"; - ASSERT_EQ(commandId, expectedCommands[commandIndex].first) - << "at command " << commandIndex; - expectedCommands[commandIndex].second(commands); +namespace dawn::native { + + class CommandBufferEncodingTests : public DawnNativeTest { + protected: + void ExpectCommands( + dawn::native::CommandIterator* commands, + std::vector>> + expectedCommands) { + dawn::native::Command commandId; + for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) { + ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command"; + ASSERT_EQ(commandId, expectedCommands[commandIndex].first) + << "at command " << commandIndex; + expectedCommands[commandIndex].second(commands); + } } - } -}; + }; -// Indirect dispatch validation changes the bind groups in the middle -// of a pass. Test that bindings are restored after the validation runs. -TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) { - using namespace dawn::native; + // Indirect dispatch validation changes the bind groups in the middle + // of a pass. Test that bindings are restored after the validation runs. + TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) { + wgpu::BindGroupLayout staticLayout = + utils::MakeBindGroupLayout(device, {{ + 0, + wgpu::ShaderStage::Compute, + wgpu::BufferBindingType::Uniform, + }}); - wgpu::BindGroupLayout staticLayout = - utils::MakeBindGroupLayout(device, {{ - 0, - wgpu::ShaderStage::Compute, - wgpu::BufferBindingType::Uniform, - }}); + wgpu::BindGroupLayout dynamicLayout = + utils::MakeBindGroupLayout(device, {{ + 0, + wgpu::ShaderStage::Compute, + wgpu::BufferBindingType::Uniform, + true, + }}); - wgpu::BindGroupLayout dynamicLayout = - utils::MakeBindGroupLayout(device, {{ - 0, - wgpu::ShaderStage::Compute, - wgpu::BufferBindingType::Uniform, - true, - }}); - - // Create a simple pipeline - wgpu::ComputePipelineDescriptor csDesc; - csDesc.compute.module = utils::CreateShaderModule(device, R"( + // Create a simple pipeline + wgpu::ComputePipelineDescriptor csDesc; + csDesc.compute.module = utils::CreateShaderModule(device, R"( @stage(compute) @workgroup_size(1, 1, 1) fn main() { })"); - csDesc.compute.entryPoint = "main"; + csDesc.compute.entryPoint = "main"; - wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout}); - csDesc.layout = pl0; - wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc); + wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout}); + csDesc.layout = pl0; + wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc); - wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout}); - csDesc.layout = pl1; - wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc); + wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout}); + csDesc.layout = pl1; + wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc); - // Create buffers to use for both the indirect buffer and the bind groups. - wgpu::Buffer indirectBuffer = - utils::CreateBufferFromData(device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4}); + // Create buffers to use for both the indirect buffer and the bind groups. + wgpu::Buffer indirectBuffer = utils::CreateBufferFromData( + device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4}); - wgpu::BufferDescriptor uniformBufferDesc = {}; - uniformBufferDesc.size = 512; - uniformBufferDesc.usage = wgpu::BufferUsage::Uniform; - wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc); + wgpu::BufferDescriptor uniformBufferDesc = {}; + uniformBufferDesc.size = 512; + uniformBufferDesc.usage = wgpu::BufferUsage::Uniform; + wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc); - wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}}); + wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}}); - wgpu::BindGroup dynamicBG = - utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}}); + wgpu::BindGroup dynamicBG = + utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}}); - uint32_t dynamicOffset = 256; - std::vector emptyDynamicOffsets = {}; - std::vector singleDynamicOffset = {dynamicOffset}; + uint32_t dynamicOffset = 256; + std::vector emptyDynamicOffsets = {}; + std::vector singleDynamicOffset = {dynamicOffset}; - // Begin encoding commands. - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); + // Begin encoding commands. + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); - CommandBufferStateTracker* stateTracker = - FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting(); + CommandBufferStateTracker* stateTracker = + FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting(); - // Perform a dispatch indirect which will be preceded by a validation dispatch. - pass.SetPipeline(pipeline0); - pass.SetBindGroup(0, staticBG); - pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset); - EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); + // Perform a dispatch indirect which will be preceded by a validation dispatch. + pass.SetPipeline(pipeline0); + pass.SetBindGroup(0, staticBG); + pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset); + EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); - pass.DispatchIndirect(indirectBuffer, 0); + pass.DispatchIndirect(indirectBuffer, 0); - // Expect restored state. - EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); - EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get()); - EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get()); - EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets); - EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get()); - EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset); + // Expect restored state. + EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); + EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get()); + EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get()); + EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets); + EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get()); + EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset); - // Dispatch again to check that the restored state can be used. - // Also pass an indirect offset which should get replaced with the offset - // into the scratch indirect buffer (0). - pass.DispatchIndirect(indirectBuffer, 4); + // Dispatch again to check that the restored state can be used. + // Also pass an indirect offset which should get replaced with the offset + // into the scratch indirect buffer (0). + pass.DispatchIndirect(indirectBuffer, 4); - // Expect restored state. - EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); - EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get()); - EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get()); - EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets); - EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get()); - EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset); + // Expect restored state. + EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); + EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get()); + EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get()); + EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets); + EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get()); + EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset); - // Change the pipeline - pass.SetPipeline(pipeline1); - pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset); - pass.SetBindGroup(1, staticBG); - EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get()); - EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get()); + // Change the pipeline + pass.SetPipeline(pipeline1); + pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset); + pass.SetBindGroup(1, staticBG); + EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get()); + EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get()); - pass.DispatchIndirect(indirectBuffer, 0); + pass.DispatchIndirect(indirectBuffer, 0); - // Expect restored state. - EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get()); - EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get()); - EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get()); - EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset); - EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get()); - EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets); + // Expect restored state. + EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get()); + EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get()); + EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get()); + EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset); + EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get()); + EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets); - pass.End(); + pass.End(); - wgpu::CommandBuffer commandBuffer = encoder.Finish(); + wgpu::CommandBuffer commandBuffer = encoder.Finish(); - auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) { - return [pipeline](CommandIterator* commands) { + auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) { + return [pipeline](CommandIterator* commands) { + auto* cmd = commands->NextCommand(); + EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get()); + }; + }; + + auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg, + std::vector offsets = {}) { + return [index, bg, offsets](CommandIterator* commands) { + auto* cmd = commands->NextCommand(); + uint32_t* dynamicOffsets = nullptr; + if (cmd->dynamicOffsetCount > 0) { + dynamicOffsets = commands->NextData(cmd->dynamicOffsetCount); + } + + ASSERT_EQ(cmd->index, BindGroupIndex(index)); + ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get()); + ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size()); + for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) { + ASSERT_EQ(dynamicOffsets[i], offsets[i]); + } + }; + }; + + // Initialize as null. Once we know the pointer, we'll check + // that it's the same buffer every time. + WGPUBuffer indirectScratchBuffer = nullptr; + auto ExpectDispatchIndirect = [&](CommandIterator* commands) { + auto* cmd = commands->NextCommand(); + if (indirectScratchBuffer == nullptr) { + indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get()); + } + ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer); + ASSERT_EQ(cmd->indirectOffset, uint64_t(0)); + }; + + // Initialize as null. Once we know the pointer, we'll check + // that it's the same pipeline every time. + WGPUComputePipeline validationPipeline = nullptr; + auto ExpectSetValidationPipeline = [&](CommandIterator* commands) { auto* cmd = commands->NextCommand(); - EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get()); + WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get()); + if (validationPipeline != nullptr) { + EXPECT_EQ(pipeline, validationPipeline); + } else { + EXPECT_NE(pipeline, nullptr); + validationPipeline = pipeline; + } }; - }; - auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg, - std::vector offsets = {}) { - return [index, bg, offsets](CommandIterator* commands) { + auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) { auto* cmd = commands->NextCommand(); - uint32_t* dynamicOffsets = nullptr; - if (cmd->dynamicOffsetCount > 0) { - dynamicOffsets = commands->NextData(cmd->dynamicOffsetCount); - } - - ASSERT_EQ(cmd->index, BindGroupIndex(index)); - ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get()); - ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size()); - for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) { - ASSERT_EQ(dynamicOffsets[i], offsets[i]); - } + ASSERT_EQ(cmd->index, BindGroupIndex(0)); + ASSERT_NE(cmd->group.Get(), nullptr); + ASSERT_EQ(cmd->dynamicOffsetCount, 0u); }; - }; - // Initialize as null. Once we know the pointer, we'll check - // that it's the same buffer every time. - WGPUBuffer indirectScratchBuffer = nullptr; - auto ExpectDispatchIndirect = [&](CommandIterator* commands) { - auto* cmd = commands->NextCommand(); - if (indirectScratchBuffer == nullptr) { - indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get()); - } - ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer); - ASSERT_EQ(cmd->indirectOffset, uint64_t(0)); - }; + auto ExpectSetValidationDispatch = [&](CommandIterator* commands) { + auto* cmd = commands->NextCommand(); + ASSERT_EQ(cmd->x, 1u); + ASSERT_EQ(cmd->y, 1u); + ASSERT_EQ(cmd->z, 1u); + }; - // Initialize as null. Once we know the pointer, we'll check - // that it's the same pipeline every time. - WGPUComputePipeline validationPipeline = nullptr; - auto ExpectSetValidationPipeline = [&](CommandIterator* commands) { - auto* cmd = commands->NextCommand(); - WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get()); - if (validationPipeline != nullptr) { - EXPECT_EQ(pipeline, validationPipeline); - } else { - EXPECT_NE(pipeline, nullptr); - validationPipeline = pipeline; - } - }; + ExpectCommands( + FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(), + { + {Command::BeginComputePass, + [&](CommandIterator* commands) { + SkipCommand(commands, Command::BeginComputePass); + }}, + // Expect the state to be set. + {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, + {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, + {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, - auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) { - auto* cmd = commands->NextCommand(); - ASSERT_EQ(cmd->index, BindGroupIndex(0)); - ASSERT_NE(cmd->group.Get(), nullptr); - ASSERT_EQ(cmd->dynamicOffsetCount, 0u); - }; + // Expect the validation. + {Command::SetComputePipeline, ExpectSetValidationPipeline}, + {Command::SetBindGroup, ExpectSetValidationBindGroup}, + {Command::Dispatch, ExpectSetValidationDispatch}, - auto ExpectSetValidationDispatch = [&](CommandIterator* commands) { - auto* cmd = commands->NextCommand(); - ASSERT_EQ(cmd->x, 1u); - ASSERT_EQ(cmd->y, 1u); - ASSERT_EQ(cmd->z, 1u); - }; + // Expect the state to be restored. + {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, + {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, + {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, - ExpectCommands( - FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(), - { - {Command::BeginComputePass, - [&](CommandIterator* commands) { SkipCommand(commands, Command::BeginComputePass); }}, - // Expect the state to be set. - {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, - {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, - {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, + // Expect the dispatchIndirect. + {Command::DispatchIndirect, ExpectDispatchIndirect}, - // Expect the validation. - {Command::SetComputePipeline, ExpectSetValidationPipeline}, - {Command::SetBindGroup, ExpectSetValidationBindGroup}, - {Command::Dispatch, ExpectSetValidationDispatch}, + // Expect the validation. + {Command::SetComputePipeline, ExpectSetValidationPipeline}, + {Command::SetBindGroup, ExpectSetValidationBindGroup}, + {Command::Dispatch, ExpectSetValidationDispatch}, - // Expect the state to be restored. - {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, - {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, - {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, + // Expect the state to be restored. + {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, + {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, + {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, - // Expect the dispatchIndirect. - {Command::DispatchIndirect, ExpectDispatchIndirect}, + // Expect the dispatchIndirect. + {Command::DispatchIndirect, ExpectDispatchIndirect}, - // Expect the validation. - {Command::SetComputePipeline, ExpectSetValidationPipeline}, - {Command::SetBindGroup, ExpectSetValidationBindGroup}, - {Command::Dispatch, ExpectSetValidationDispatch}, + // Expect the state to be set (new pipeline). + {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)}, + {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})}, + {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)}, - // Expect the state to be restored. - {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, - {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, - {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, + // Expect the validation. + {Command::SetComputePipeline, ExpectSetValidationPipeline}, + {Command::SetBindGroup, ExpectSetValidationBindGroup}, + {Command::Dispatch, ExpectSetValidationDispatch}, - // Expect the dispatchIndirect. - {Command::DispatchIndirect, ExpectDispatchIndirect}, + // Expect the state to be restored. + {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)}, + {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})}, + {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)}, - // Expect the state to be set (new pipeline). - {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)}, - {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})}, - {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)}, + // Expect the dispatchIndirect. + {Command::DispatchIndirect, ExpectDispatchIndirect}, - // Expect the validation. - {Command::SetComputePipeline, ExpectSetValidationPipeline}, - {Command::SetBindGroup, ExpectSetValidationBindGroup}, - {Command::Dispatch, ExpectSetValidationDispatch}, + {Command::EndComputePass, + [&](CommandIterator* commands) { commands->NextCommand(); }}, + }); + } - // Expect the state to be restored. - {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)}, - {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})}, - {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)}, + // Test that after restoring state, it is fully applied to the state tracker + // and does not leak state changes that occured between a snapshot and the + // state restoration. + TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) { + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); - // Expect the dispatchIndirect. - {Command::DispatchIndirect, ExpectDispatchIndirect}, + CommandBufferStateTracker* stateTracker = + FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting(); - {Command::EndComputePass, - [&](CommandIterator* commands) { commands->NextCommand(); }}, - }); -} + // Snapshot the state. + CommandBufferStateTracker snapshot = *stateTracker; + // Expect no pipeline in the snapshot + EXPECT_FALSE(snapshot.HasPipeline()); -// Test that after restoring state, it is fully applied to the state tracker -// and does not leak state changes that occured between a snapshot and the -// state restoration. -TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) { - using namespace dawn::native; - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); - - CommandBufferStateTracker* stateTracker = - FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting(); - - // Snapshot the state. - CommandBufferStateTracker snapshot = *stateTracker; - // Expect no pipeline in the snapshot - EXPECT_FALSE(snapshot.HasPipeline()); - - // Create a simple pipeline - wgpu::ComputePipelineDescriptor csDesc; - csDesc.compute.module = utils::CreateShaderModule(device, R"( + // Create a simple pipeline + wgpu::ComputePipelineDescriptor csDesc; + csDesc.compute.module = utils::CreateShaderModule(device, R"( @stage(compute) @workgroup_size(1, 1, 1) fn main() { })"); - csDesc.compute.entryPoint = "main"; - wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc); + csDesc.compute.entryPoint = "main"; + wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc); - // Set the pipeline. - pass.SetPipeline(pipeline); + // Set the pipeline. + pass.SetPipeline(pipeline); - // Expect the pipeline to be set. - EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get()); + // Expect the pipeline to be set. + EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get()); - // Restore the state. - FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot)); + // Restore the state. + FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot)); - // Expect no pipeline - EXPECT_FALSE(stateTracker->HasPipeline()); -} + // Expect no pipeline + EXPECT_FALSE(stateTracker->HasPipeline()); + } + +} // namespace dawn::native diff --git a/src/dawn/tests/unittests/native/DeviceCreationTests.cpp b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp index e6bccedf51..f7553ecffe 100644 --- a/src/dawn/tests/unittests/native/DeviceCreationTests.cpp +++ b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp @@ -25,9 +25,13 @@ namespace { - using namespace testing; + using testing::Contains; + using testing::MockCallback; + using testing::NotNull; + using testing::SaveArg; + using testing::StrEq; - class DeviceCreationTest : public Test { + class DeviceCreationTest : public testing::Test { protected: void SetUp() override { dawnProcSetProcs(&dawn::native::GetProcs()); @@ -83,7 +87,7 @@ namespace { EXPECT_NE(device, nullptr); auto toggles = dawn::native::GetTogglesUsed(device.Get()); - EXPECT_THAT(toggles, testing::Contains(testing::StrEq(toggle))); + EXPECT_THAT(toggles, Contains(StrEq(toggle))); } TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) { diff --git a/src/dawn/tests/unittests/validation/BufferValidationTests.cpp b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp index 5f0b492b8e..147e45ecc3 100644 --- a/src/dawn/tests/unittests/validation/BufferValidationTests.cpp +++ b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp @@ -18,7 +18,8 @@ #include "gmock/gmock.h" #include "dawn/tests/unittests/validation/ValidationTest.h" -using namespace testing; +using testing::_; +using testing::InvokeWithoutArgs; class MockBufferMapAsyncCallback { public: diff --git a/src/dawn/tests/unittests/validation/ErrorScopeValidationTests.cpp b/src/dawn/tests/unittests/validation/ErrorScopeValidationTests.cpp index 6d6e32b035..7c60803b65 100644 --- a/src/dawn/tests/unittests/validation/ErrorScopeValidationTests.cpp +++ b/src/dawn/tests/unittests/validation/ErrorScopeValidationTests.cpp @@ -18,7 +18,9 @@ #include "dawn/tests/unittests/validation/ValidationTest.h" #include "gmock/gmock.h" -using namespace testing; +using testing::_; +using testing::MockCallback; +using testing::Sequence; class MockDevicePopErrorScopeCallback { public: @@ -170,7 +172,7 @@ TEST_F(ErrorScopeValidationTest, EnclosedQueueSubmitNested) { queue.Submit(0, nullptr); queue.OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this); - testing::Sequence seq; + Sequence seq; MockCallback errorScopeCallback2; EXPECT_CALL(errorScopeCallback2, Call(WGPUErrorType_NoError, _, this + 1)).InSequence(seq); diff --git a/src/dawn/tests/unittests/validation/MultipleDeviceTests.cpp b/src/dawn/tests/unittests/validation/MultipleDeviceTests.cpp index f84748f6ad..2f1c515e4f 100644 --- a/src/dawn/tests/unittests/validation/MultipleDeviceTests.cpp +++ b/src/dawn/tests/unittests/validation/MultipleDeviceTests.cpp @@ -16,7 +16,12 @@ #include "dawn/tests/MockCallback.h" -using namespace testing; +using testing::_; +using testing::Invoke; +using testing::MockCallback; +using testing::NotNull; +using testing::StrictMock; +using testing::WithArg; class MultipleDeviceTest : public ValidationTest {}; diff --git a/src/dawn/tests/unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp index f5507e8418..0cbf9933fc 100644 --- a/src/dawn/tests/unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp +++ b/src/dawn/tests/unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp @@ -17,8 +17,6 @@ #include "dawn/tests/unittests/validation/ValidationTest.h" #include "gmock/gmock.h" -using namespace testing; - class MockQueueWorkDoneCallback { public: MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata)); diff --git a/src/dawn/tests/unittests/wire/WireAdapterTests.cpp b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp index 438c6e7a32..72bda9f822 100644 --- a/src/dawn/tests/unittests/wire/WireAdapterTests.cpp +++ b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp @@ -23,10 +23,17 @@ #include "webgpu/webgpu_cpp.h" -namespace { +namespace dawn::wire { namespace { - using namespace testing; - using namespace dawn::wire; + using testing::_; + using testing::Invoke; + using testing::InvokeWithoutArgs; + using testing::MockCallback; + using testing::NotNull; + using testing::Return; + using testing::SaveArg; + using testing::StrEq; + using testing::WithArg; class WireAdapterTests : public WireTest { protected: @@ -328,4 +335,6 @@ namespace { GetWireClient()->Disconnect(); } -} // anonymous namespace + // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented. + // NOLINTNEXTLINE(readability/namespace) +}} // namespace dawn::wire:: diff --git a/src/dawn/tests/unittests/wire/WireArgumentTests.cpp b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp index b928a53294..c208085c17 100644 --- a/src/dawn/tests/unittests/wire/WireArgumentTests.cpp +++ b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp @@ -18,239 +18,253 @@ #include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/common/Constants.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireArgumentTests : public WireTest { - public: - WireArgumentTests() { - } - ~WireArgumentTests() override = default; -}; + using testing::_; + using testing::Return; + using testing::Sequence; -// Test that the wire is able to send numerical values -TEST_F(WireArgumentTests, ValueArgument) { - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr); - wgpuComputePassEncoderDispatch(pass, 1, 2, 3); + class WireArgumentTests : public WireTest { + public: + WireArgumentTests() { + } + ~WireArgumentTests() override = default; + }; - WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder)); + // Test that the wire is able to send numerical values + TEST_F(WireArgumentTests, ValueArgument) { + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr); + wgpuComputePassEncoderDispatch(pass, 1, 2, 3); - WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder(); - EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass)); - - EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1); - - FlushClient(); -} - -// Test that the wire is able to send arrays of numerical values -TEST_F(WireArgumentTests, ValueArrayArgument) { - // Create a bindgroup. - WGPUBindGroupLayoutDescriptor bglDescriptor = {}; - bglDescriptor.entryCount = 0; - bglDescriptor.entries = nullptr; - - WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); - WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); - EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl)); - - WGPUBindGroupDescriptor bindGroupDescriptor = {}; - bindGroupDescriptor.layout = bgl; - bindGroupDescriptor.entryCount = 0; - bindGroupDescriptor.entries = nullptr; - - WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor); - WGPUBindGroup apiBindGroup = api.GetNewBindGroup(); - EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup)); - - // Use the bindgroup in SetBindGroup that takes an array of value offsets. - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr); - - std::array testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu}; - wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(), testOffsets.data()); - - WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder)); - - WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder(); - EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass)); - - EXPECT_CALL(api, ComputePassEncoderSetBindGroup( - apiPass, 0, apiBindGroup, testOffsets.size(), - MatchesLambda([testOffsets](const uint32_t* offsets) -> bool { - for (size_t i = 0; i < testOffsets.size(); i++) { - if (offsets[i] != testOffsets[i]) { - return false; - } - } - return true; - }))); - - FlushClient(); -} - -// Test that the wire is able to send C strings -TEST_F(WireArgumentTests, CStringArgument) { - // Create shader module - WGPUShaderModuleDescriptor vertexDescriptor = {}; - WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); - WGPUShaderModule apiVsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); - - // Create the color state descriptor - WGPUBlendComponent blendComponent = {}; - blendComponent.operation = WGPUBlendOperation_Add; - blendComponent.srcFactor = WGPUBlendFactor_One; - blendComponent.dstFactor = WGPUBlendFactor_One; - WGPUBlendState blendState = {}; - blendState.alpha = blendComponent; - blendState.color = blendComponent; - WGPUColorTargetState colorTargetState = {}; - colorTargetState.format = WGPUTextureFormat_RGBA8Unorm; - colorTargetState.blend = &blendState; - colorTargetState.writeMask = WGPUColorWriteMask_All; - - // Create the depth-stencil state - WGPUStencilFaceState stencilFace = {}; - stencilFace.compare = WGPUCompareFunction_Always; - stencilFace.failOp = WGPUStencilOperation_Keep; - stencilFace.depthFailOp = WGPUStencilOperation_Keep; - stencilFace.passOp = WGPUStencilOperation_Keep; - - WGPUDepthStencilState depthStencilState = {}; - depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8; - depthStencilState.depthWriteEnabled = false; - depthStencilState.depthCompare = WGPUCompareFunction_Always; - depthStencilState.stencilBack = stencilFace; - depthStencilState.stencilFront = stencilFace; - depthStencilState.stencilReadMask = 0xff; - depthStencilState.stencilWriteMask = 0xff; - depthStencilState.depthBias = 0; - depthStencilState.depthBiasSlopeScale = 0.0; - depthStencilState.depthBiasClamp = 0.0; - - // Create the pipeline layout - WGPUPipelineLayoutDescriptor layoutDescriptor = {}; - layoutDescriptor.bindGroupLayoutCount = 0; - layoutDescriptor.bindGroupLayouts = nullptr; - WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor); - WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout(); - EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout)); - - // Create pipeline - WGPURenderPipelineDescriptor pipelineDescriptor = {}; - - pipelineDescriptor.vertex.module = vsModule; - pipelineDescriptor.vertex.entryPoint = "main"; - pipelineDescriptor.vertex.bufferCount = 0; - pipelineDescriptor.vertex.buffers = nullptr; - - WGPUFragmentState fragment = {}; - fragment.module = vsModule; - fragment.entryPoint = "main"; - fragment.targetCount = 1; - fragment.targets = &colorTargetState; - pipelineDescriptor.fragment = &fragment; - - pipelineDescriptor.multisample.count = 1; - pipelineDescriptor.multisample.mask = 0xFFFFFFFF; - pipelineDescriptor.multisample.alphaToCoverageEnabled = false; - pipelineDescriptor.layout = layout; - pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList; - pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW; - pipelineDescriptor.primitive.cullMode = WGPUCullMode_None; - pipelineDescriptor.depthStencil = &depthStencilState; - - wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); - - WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline(); - EXPECT_CALL(api, - DeviceCreateRenderPipeline( - apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { - return desc->vertex.entryPoint == std::string("main"); - }))) - .WillOnce(Return(apiPlaceholderPipeline)); - - FlushClient(); -} - -// Test that the wire is able to send objects as value arguments -TEST_F(WireArgumentTests, ObjectAsValueArgument) { - WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder)); - - WGPUBufferDescriptor descriptor = {}; - descriptor.size = 8; - descriptor.usage = - static_cast(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst); - - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); - WGPUBuffer apiBuffer = api.GetNewBuffer(); - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)) - .WillOnce(Return(apiBuffer)) - .RetiresOnSaturation(); - - wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4); - EXPECT_CALL(api, CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4)); - - FlushClient(); -} - -// Test that the wire is able to send array of objects -TEST_F(WireArgumentTests, ObjectsAsPointerArgument) { - WGPUCommandBuffer cmdBufs[2]; - WGPUCommandBuffer apiCmdBufs[2]; - - // Create two command buffers we need to use a GMock sequence otherwise the order of the - // CreateCommandEncoder might be swapped since they are equivalent in term of matchers - Sequence s; - for (int i = 0; i < 2; ++i) { - WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr); - - WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .InSequence(s) - .WillOnce(Return(apiCmdBufEncoder)); + .WillOnce(Return(apiEncoder)); - apiCmdBufs[i] = api.GetNewCommandBuffer(); - EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)) - .WillOnce(Return(apiCmdBufs[i])); + WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder(); + EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)) + .WillOnce(Return(apiPass)); + + EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1); + + FlushClient(); } - // Submit command buffer and check we got a call with both API-side command buffers - wgpuQueueSubmit(queue, 2, cmdBufs); + // Test that the wire is able to send arrays of numerical values + TEST_F(WireArgumentTests, ValueArrayArgument) { + // Create a bindgroup. + WGPUBindGroupLayoutDescriptor bglDescriptor = {}; + bglDescriptor.entryCount = 0; + bglDescriptor.entries = nullptr; - EXPECT_CALL( - api, QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool { - return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1]; - }))); + WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); + WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); + EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl)); - FlushClient(); -} + WGPUBindGroupDescriptor bindGroupDescriptor = {}; + bindGroupDescriptor.layout = bgl; + bindGroupDescriptor.entryCount = 0; + bindGroupDescriptor.entries = nullptr; -// Test that the wire is able to send structures that contain pure values (non-objects) -TEST_F(WireArgumentTests, StructureOfValuesArgument) { - WGPUSamplerDescriptor descriptor = {}; - descriptor.magFilter = WGPUFilterMode_Linear; - descriptor.minFilter = WGPUFilterMode_Nearest; - descriptor.mipmapFilter = WGPUFilterMode_Linear; - descriptor.addressModeU = WGPUAddressMode_ClampToEdge; - descriptor.addressModeV = WGPUAddressMode_Repeat; - descriptor.addressModeW = WGPUAddressMode_MirrorRepeat; - descriptor.lodMinClamp = kLodMin; - descriptor.lodMaxClamp = kLodMax; - descriptor.compare = WGPUCompareFunction_Never; + WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor); + WGPUBindGroup apiBindGroup = api.GetNewBindGroup(); + EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup)); - wgpuDeviceCreateSampler(device, &descriptor); + // Use the bindgroup in SetBindGroup that takes an array of value offsets. + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr); - WGPUSampler apiPlaceholderSampler = api.GetNewSampler(); - EXPECT_CALL(api, DeviceCreateSampler( - apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool { + std::array testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu}; + wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(), + testOffsets.data()); + + WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiEncoder)); + + WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder(); + EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)) + .WillOnce(Return(apiPass)); + + EXPECT_CALL(api, ComputePassEncoderSetBindGroup( + apiPass, 0, apiBindGroup, testOffsets.size(), + MatchesLambda([testOffsets](const uint32_t* offsets) -> bool { + for (size_t i = 0; i < testOffsets.size(); i++) { + if (offsets[i] != testOffsets[i]) { + return false; + } + } + return true; + }))); + + FlushClient(); + } + + // Test that the wire is able to send C strings + TEST_F(WireArgumentTests, CStringArgument) { + // Create shader module + WGPUShaderModuleDescriptor vertexDescriptor = {}; + WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); + WGPUShaderModule apiVsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); + + // Create the color state descriptor + WGPUBlendComponent blendComponent = {}; + blendComponent.operation = WGPUBlendOperation_Add; + blendComponent.srcFactor = WGPUBlendFactor_One; + blendComponent.dstFactor = WGPUBlendFactor_One; + WGPUBlendState blendState = {}; + blendState.alpha = blendComponent; + blendState.color = blendComponent; + WGPUColorTargetState colorTargetState = {}; + colorTargetState.format = WGPUTextureFormat_RGBA8Unorm; + colorTargetState.blend = &blendState; + colorTargetState.writeMask = WGPUColorWriteMask_All; + + // Create the depth-stencil state + WGPUStencilFaceState stencilFace = {}; + stencilFace.compare = WGPUCompareFunction_Always; + stencilFace.failOp = WGPUStencilOperation_Keep; + stencilFace.depthFailOp = WGPUStencilOperation_Keep; + stencilFace.passOp = WGPUStencilOperation_Keep; + + WGPUDepthStencilState depthStencilState = {}; + depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8; + depthStencilState.depthWriteEnabled = false; + depthStencilState.depthCompare = WGPUCompareFunction_Always; + depthStencilState.stencilBack = stencilFace; + depthStencilState.stencilFront = stencilFace; + depthStencilState.stencilReadMask = 0xff; + depthStencilState.stencilWriteMask = 0xff; + depthStencilState.depthBias = 0; + depthStencilState.depthBiasSlopeScale = 0.0; + depthStencilState.depthBiasClamp = 0.0; + + // Create the pipeline layout + WGPUPipelineLayoutDescriptor layoutDescriptor = {}; + layoutDescriptor.bindGroupLayoutCount = 0; + layoutDescriptor.bindGroupLayouts = nullptr; + WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor); + WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout(); + EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout)); + + // Create pipeline + WGPURenderPipelineDescriptor pipelineDescriptor = {}; + + pipelineDescriptor.vertex.module = vsModule; + pipelineDescriptor.vertex.entryPoint = "main"; + pipelineDescriptor.vertex.bufferCount = 0; + pipelineDescriptor.vertex.buffers = nullptr; + + WGPUFragmentState fragment = {}; + fragment.module = vsModule; + fragment.entryPoint = "main"; + fragment.targetCount = 1; + fragment.targets = &colorTargetState; + pipelineDescriptor.fragment = &fragment; + + pipelineDescriptor.multisample.count = 1; + pipelineDescriptor.multisample.mask = 0xFFFFFFFF; + pipelineDescriptor.multisample.alphaToCoverageEnabled = false; + pipelineDescriptor.layout = layout; + pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList; + pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW; + pipelineDescriptor.primitive.cullMode = WGPUCullMode_None; + pipelineDescriptor.depthStencil = &depthStencilState; + + wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); + + WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline(); + EXPECT_CALL( + api, DeviceCreateRenderPipeline( + apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { + return desc->vertex.entryPoint == std::string("main"); + }))) + .WillOnce(Return(apiPlaceholderPipeline)); + + FlushClient(); + } + + // Test that the wire is able to send objects as value arguments + TEST_F(WireArgumentTests, ObjectAsValueArgument) { + WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiEncoder)); + + WGPUBufferDescriptor descriptor = {}; + descriptor.size = 8; + descriptor.usage = + static_cast(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst); + + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + WGPUBuffer apiBuffer = api.GetNewBuffer(); + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)) + .WillOnce(Return(apiBuffer)) + .RetiresOnSaturation(); + + wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4); + EXPECT_CALL(api, + CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4)); + + FlushClient(); + } + + // Test that the wire is able to send array of objects + TEST_F(WireArgumentTests, ObjectsAsPointerArgument) { + WGPUCommandBuffer cmdBufs[2]; + WGPUCommandBuffer apiCmdBufs[2]; + + // Create two command buffers we need to use a GMock sequence otherwise the order of the + // CreateCommandEncoder might be swapped since they are equivalent in term of matchers + Sequence s; + for (int i = 0; i < 2; ++i) { + WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr); + + WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .InSequence(s) + .WillOnce(Return(apiCmdBufEncoder)); + + apiCmdBufs[i] = api.GetNewCommandBuffer(); + EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)) + .WillOnce(Return(apiCmdBufs[i])); + } + + // Submit command buffer and check we got a call with both API-side command buffers + wgpuQueueSubmit(queue, 2, cmdBufs); + + EXPECT_CALL( + api, + QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool { + return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1]; + }))); + + FlushClient(); + } + + // Test that the wire is able to send structures that contain pure values (non-objects) + TEST_F(WireArgumentTests, StructureOfValuesArgument) { + WGPUSamplerDescriptor descriptor = {}; + descriptor.magFilter = WGPUFilterMode_Linear; + descriptor.minFilter = WGPUFilterMode_Nearest; + descriptor.mipmapFilter = WGPUFilterMode_Linear; + descriptor.addressModeU = WGPUAddressMode_ClampToEdge; + descriptor.addressModeV = WGPUAddressMode_Repeat; + descriptor.addressModeW = WGPUAddressMode_MirrorRepeat; + descriptor.lodMinClamp = kLodMin; + descriptor.lodMaxClamp = kLodMax; + descriptor.compare = WGPUCompareFunction_Never; + + wgpuDeviceCreateSampler(device, &descriptor); + + WGPUSampler apiPlaceholderSampler = api.GetNewSampler(); + EXPECT_CALL( + api, DeviceCreateSampler( + apiDevice, + MatchesLambda( + [](const WGPUSamplerDescriptor* desc) -> bool { return desc->nextInChain == nullptr && desc->magFilter == WGPUFilterMode_Linear && desc->minFilter == WGPUFilterMode_Nearest && @@ -261,108 +275,111 @@ TEST_F(WireArgumentTests, StructureOfValuesArgument) { desc->compare == WGPUCompareFunction_Never && desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax; }))) - .WillOnce(Return(apiPlaceholderSampler)); + .WillOnce(Return(apiPlaceholderSampler)); - FlushClient(); -} + FlushClient(); + } -// Test that the wire is able to send structures that contain objects -TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) { - WGPUBindGroupLayoutDescriptor bglDescriptor = {}; - bglDescriptor.entryCount = 0; - bglDescriptor.entries = nullptr; + // Test that the wire is able to send structures that contain objects + TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) { + WGPUBindGroupLayoutDescriptor bglDescriptor = {}; + bglDescriptor.entryCount = 0; + bglDescriptor.entries = nullptr; - WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); - WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); - EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl)); + WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); + WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); + EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl)); - WGPUPipelineLayoutDescriptor descriptor = {}; - descriptor.bindGroupLayoutCount = 1; - descriptor.bindGroupLayouts = &bgl; + WGPUPipelineLayoutDescriptor descriptor = {}; + descriptor.bindGroupLayoutCount = 1; + descriptor.bindGroupLayouts = &bgl; - wgpuDeviceCreatePipelineLayout(device, &descriptor); + wgpuDeviceCreatePipelineLayout(device, &descriptor); - WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout(); - EXPECT_CALL(api, DeviceCreatePipelineLayout( - apiDevice, - MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool { - return desc->nextInChain == nullptr && - desc->bindGroupLayoutCount == 1 && - desc->bindGroupLayouts[0] == apiBgl; - }))) - .WillOnce(Return(apiPlaceholderLayout)); + WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout(); + EXPECT_CALL( + api, DeviceCreatePipelineLayout( + apiDevice, + MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool { + return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 && + desc->bindGroupLayouts[0] == apiBgl; + }))) + .WillOnce(Return(apiPlaceholderLayout)); - FlushClient(); -} + FlushClient(); + } -// Test that the wire is able to send structures that contain objects -TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) { - static constexpr int NUM_BINDINGS = 3; - WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{ - {nullptr, - 0, - WGPUShaderStage_Vertex, - {}, - {nullptr, WGPUSamplerBindingType_Filtering}, - {}, - {}}, - {nullptr, - 1, - WGPUShaderStage_Vertex, - {}, - {}, - {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false}, - {}}, - {nullptr, - 2, - static_cast(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment), - {nullptr, WGPUBufferBindingType_Uniform, false, 0}, - {}, - {}, - {}}, - }; - WGPUBindGroupLayoutDescriptor bglDescriptor = {}; - bglDescriptor.entryCount = NUM_BINDINGS; - bglDescriptor.entries = entries; + // Test that the wire is able to send structures that contain objects + TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) { + static constexpr int NUM_BINDINGS = 3; + WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{ + {nullptr, + 0, + WGPUShaderStage_Vertex, + {}, + {nullptr, WGPUSamplerBindingType_Filtering}, + {}, + {}}, + {nullptr, + 1, + WGPUShaderStage_Vertex, + {}, + {}, + {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false}, + {}}, + {nullptr, + 2, + static_cast(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment), + {nullptr, WGPUBufferBindingType_Uniform, false, 0}, + {}, + {}, + {}}, + }; + WGPUBindGroupLayoutDescriptor bglDescriptor = {}; + bglDescriptor.entryCount = NUM_BINDINGS; + bglDescriptor.entries = entries; - wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); - WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); - EXPECT_CALL( - api, - DeviceCreateBindGroupLayout( - apiDevice, MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool { - for (int i = 0; i < NUM_BINDINGS; ++i) { - const auto& a = desc->entries[i]; - const auto& b = entries[i]; - if (a.binding != b.binding || a.visibility != b.visibility || - a.buffer.type != b.buffer.type || a.sampler.type != b.sampler.type || - a.texture.sampleType != b.texture.sampleType) { - return false; - } - } - return desc->nextInChain == nullptr && desc->entryCount == 3; - }))) - .WillOnce(Return(apiBgl)); + wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); + WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); + EXPECT_CALL(api, + DeviceCreateBindGroupLayout( + apiDevice, + MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool { + for (int i = 0; i < NUM_BINDINGS; ++i) { + const auto& a = desc->entries[i]; + const auto& b = entries[i]; + if (a.binding != b.binding || a.visibility != b.visibility || + a.buffer.type != b.buffer.type || + a.sampler.type != b.sampler.type || + a.texture.sampleType != b.texture.sampleType) { + return false; + } + } + return desc->nextInChain == nullptr && desc->entryCount == 3; + }))) + .WillOnce(Return(apiBgl)); - FlushClient(); -} + FlushClient(); + } -// Test passing nullptr instead of objects - array of objects version -TEST_F(WireArgumentTests, DISABLED_NullptrInArray) { - WGPUBindGroupLayout nullBGL = nullptr; + // Test passing nullptr instead of objects - array of objects version + TEST_F(WireArgumentTests, DISABLED_NullptrInArray) { + WGPUBindGroupLayout nullBGL = nullptr; - WGPUPipelineLayoutDescriptor descriptor = {}; - descriptor.bindGroupLayoutCount = 1; - descriptor.bindGroupLayouts = &nullBGL; + WGPUPipelineLayoutDescriptor descriptor = {}; + descriptor.bindGroupLayoutCount = 1; + descriptor.bindGroupLayouts = &nullBGL; - wgpuDeviceCreatePipelineLayout(device, &descriptor); - EXPECT_CALL(api, - DeviceCreatePipelineLayout( - apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool { - return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 && - desc->bindGroupLayouts[0] == nullptr; - }))) - .WillOnce(Return(nullptr)); + wgpuDeviceCreatePipelineLayout(device, &descriptor); + EXPECT_CALL( + api, DeviceCreatePipelineLayout( + apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool { + return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 && + desc->bindGroupLayouts[0] == nullptr; + }))) + .WillOnce(Return(nullptr)); - FlushClient(); -} + FlushClient(); + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireBasicTests.cpp b/src/dawn/tests/unittests/wire/WireBasicTests.cpp index abf5d9d6cc..9a05acfeef 100644 --- a/src/dawn/tests/unittests/wire/WireBasicTests.cpp +++ b/src/dawn/tests/unittests/wire/WireBasicTests.cpp @@ -14,67 +14,71 @@ #include "dawn/tests/unittests/wire/WireTest.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireBasicTests : public WireTest { - public: - WireBasicTests() { + using testing::Return; + + class WireBasicTests : public WireTest { + public: + WireBasicTests() { + } + ~WireBasicTests() override = default; + }; + + // One call gets forwarded correctly. + TEST_F(WireBasicTests, CallForwarded) { + wgpuDeviceCreateCommandEncoder(device, nullptr); + + WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiCmdBufEncoder)); + + FlushClient(); } - ~WireBasicTests() override = default; -}; -// One call gets forwarded correctly. -TEST_F(WireBasicTests, CallForwarded) { - wgpuDeviceCreateCommandEncoder(device, nullptr); + // Test that calling methods on a new object works as expected. + TEST_F(WireBasicTests, CreateThenCall) { + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + wgpuCommandEncoderFinish(encoder, nullptr); - WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .WillOnce(Return(apiCmdBufEncoder)); + WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiCmdBufEncoder)); - FlushClient(); -} + WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer(); + EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)) + .WillOnce(Return(apiCmdBuf)); -// Test that calling methods on a new object works as expected. -TEST_F(WireBasicTests, CreateThenCall) { - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - wgpuCommandEncoderFinish(encoder, nullptr); + FlushClient(); + } - WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .WillOnce(Return(apiCmdBufEncoder)); + // Test that client reference/release do not call the backend API. + TEST_F(WireBasicTests, RefCountKeptInClient) { + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer(); - EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)).WillOnce(Return(apiCmdBuf)); + wgpuCommandEncoderReference(encoder); + wgpuCommandEncoderRelease(encoder); - FlushClient(); -} + WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiCmdBufEncoder)); -// Test that client reference/release do not call the backend API. -TEST_F(WireBasicTests, RefCountKeptInClient) { - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + FlushClient(); + } - wgpuCommandEncoderReference(encoder); - wgpuCommandEncoderRelease(encoder); + // Test that client reference/release do not call the backend API. + TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) { + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .WillOnce(Return(apiCmdBufEncoder)); + wgpuCommandEncoderRelease(encoder); - FlushClient(); -} + WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiCmdBufEncoder)); -// Test that client reference/release do not call the backend API. -TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) { - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder)); - wgpuCommandEncoderRelease(encoder); + FlushClient(); + } - WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .WillOnce(Return(apiCmdBufEncoder)); - - EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder)); - - FlushClient(); -} +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp index c8c1b69be8..c4fb948515 100644 --- a/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp +++ b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp @@ -18,797 +18,842 @@ #include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/wire/WireClient.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -namespace { + using testing::_; + using testing::InvokeWithoutArgs; + using testing::Mock; + using testing::Return; + using testing::StrictMock; - // Mock class to add expectations on the wire calling callbacks - class MockBufferMapCallback { + namespace { + + // Mock class to add expectations on the wire calling callbacks + class MockBufferMapCallback { + public: + MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata)); + }; + + std::unique_ptr> mockBufferMapCallback; + void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) { + mockBufferMapCallback->Call(status, userdata); + } + + } // anonymous namespace + + class WireBufferMappingTests : public WireTest { public: - MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata)); + WireBufferMappingTests() { + } + ~WireBufferMappingTests() override = default; + + void SetUp() override { + WireTest::SetUp(); + + mockBufferMapCallback = std::make_unique>(); + apiBuffer = api.GetNewBuffer(); + } + + void TearDown() override { + WireTest::TearDown(); + + // Delete mock so that expectations are checked + mockBufferMapCallback = nullptr; + } + + void FlushClient() { + WireTest::FlushClient(); + Mock::VerifyAndClearExpectations(&mockBufferMapCallback); + } + + void FlushServer() { + WireTest::FlushServer(); + Mock::VerifyAndClearExpectations(&mockBufferMapCallback); + } + + void SetupBuffer(WGPUBufferUsageFlags usage) { + WGPUBufferDescriptor descriptor = {}; + descriptor.size = kBufferSize; + descriptor.usage = usage; + + buffer = wgpuDeviceCreateBuffer(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)) + .WillOnce(Return(apiBuffer)) + .RetiresOnSaturation(); + FlushClient(); + } + + protected: + static constexpr uint64_t kBufferSize = sizeof(uint32_t); + // A successfully created buffer + WGPUBuffer buffer; + WGPUBuffer apiBuffer; }; - std::unique_ptr> mockBufferMapCallback; - void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) { - mockBufferMapCallback->Call(status, userdata); - } + // Tests specific to mapping for reading + class WireBufferMappingReadTests : public WireBufferMappingTests { + public: + WireBufferMappingReadTests() { + } + ~WireBufferMappingReadTests() override = default; -} // anonymous namespace + void SetUp() override { + WireBufferMappingTests::SetUp(); -class WireBufferMappingTests : public WireTest { - public: - WireBufferMappingTests() { - } - ~WireBufferMappingTests() override = default; + SetupBuffer(WGPUBufferUsage_MapRead); + } + }; - void SetUp() override { - WireTest::SetUp(); + // Check mapping for reading a succesfully created buffer + TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - mockBufferMapCallback = std::make_unique>(); - apiBuffer = api.GetNewBuffer(); - } + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - void TearDown() override { - WireTest::TearDown(); + FlushClient(); - // Delete mock so that expectations are checked - mockBufferMapCallback = nullptr; - } + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - void FlushClient() { - WireTest::FlushClient(); - Mock::VerifyAndClearExpectations(&mockBufferMapCallback); - } + FlushServer(); - void FlushServer() { - WireTest::FlushServer(); - Mock::VerifyAndClearExpectations(&mockBufferMapCallback); - } + EXPECT_EQ(bufferContent, *static_cast( + wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); - void SetupBuffer(WGPUBufferUsageFlags usage) { - WGPUBufferDescriptor descriptor = {}; - descriptor.size = kBufferSize; - descriptor.usage = usage; + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - buffer = wgpuDeviceCreateBuffer(device, &descriptor); - - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)) - .WillOnce(Return(apiBuffer)) - .RetiresOnSaturation(); FlushClient(); } - protected: - static constexpr uint64_t kBufferSize = sizeof(uint32_t); - // A successfully created buffer - WGPUBuffer buffer; - WGPUBuffer apiBuffer; -}; + // Check that things work correctly when a validation error happens when mapping the buffer for + // reading + TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); -// Tests specific to mapping for reading -class WireBufferMappingReadTests : public WireBufferMappingTests { - public: - WireBufferMappingReadTests() { + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); + + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + + FlushServer(); + + EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)); } - ~WireBufferMappingReadTests() override = default; - void SetUp() override { - WireBufferMappingTests::SetUp(); + // Check that the map read callback is called with UNKNOWN when the buffer is destroyed before + // the request is finished + TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - SetupBuffer(WGPUBufferUsage_MapRead); + // Return success + uint32_t bufferContent = 0; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); + + // Destroy before the client gets the success, so the callback is called with + // DestroyedBeforeCallback. + EXPECT_CALL(*mockBufferMapCallback, + Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _)) + .Times(1); + wgpuBufferRelease(buffer); + EXPECT_CALL(api, BufferRelease(apiBuffer)); + + FlushClient(); + FlushServer(); } -}; -// Check mapping for reading a succesfully created buffer -TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - - FlushServer(); - - EXPECT_EQ(bufferContent, - *static_cast(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); - - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); -} - -// Check that things work correctly when a validation error happens when mapping the buffer for -// reading -TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - - FlushServer(); - - EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)); -} - -// Check that the map read callback is called with UNKNOWN when the buffer is destroyed before the -// request is finished -TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // Return success - uint32_t bufferContent = 0; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); - - // Destroy before the client gets the success, so the callback is called with - // DestroyedBeforeCallback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _)) - .Times(1); - wgpuBufferRelease(buffer); - EXPECT_CALL(api, BufferRelease(apiBuffer)); - - FlushClient(); - FlushServer(); -} - -// Check the map read callback is called with "UnmappedBeforeCallback" when the map request would -// have worked, but Unmap was called -TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); - - // Oh no! We are calling Unmap too early! However the callback gets fired only after we get - // an answer from the server. - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)); - - FlushClient(); - - // The callback shouldn't get called with success, even when the request succeeded on the - // server side - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _)) - .Times(1); - - FlushServer(); -} - -// Check that even if Unmap() was called early client-side, we correctly surface server-side -// validation errors. -TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); - - // Oh no! We are calling Unmap too early! However the callback gets fired only after we get - // an answer from the server that the mapAsync call was an error. - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)); - - FlushClient(); - - // The callback should be called with the server-side error and not the UnmappedBeforeCallback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - - FlushServer(); -} - -// Check the map read callback is called with "DestroyedBeforeCallback" when the map request would -// have worked, but Destroy was called -TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); - - // Oh no! We are calling Unmap too early! However the callback gets fired only after we get - // an answer from the server. - wgpuBufferDestroy(buffer); - EXPECT_CALL(api, BufferDestroy(apiBuffer)); - - FlushClient(); - - // The callback shouldn't get called with success, even when the request succeeded on the - // server side - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _)) - .Times(1); - - FlushServer(); -} - -// Check that even if Destroy() was called early client-side, we correctly surface server-side -// validation errors. -TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); - - // Oh no! We are calling Destroy too early! However the callback gets fired only after we get - // an answer from the server that the mapAsync call was an error. - wgpuBufferDestroy(buffer); - EXPECT_CALL(api, BufferDestroy(apiBuffer)); - - FlushClient(); - - // The callback should be called with the server-side error and not the DestroyedBeforCallback.. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - - FlushServer(); -} - -// Check that an error map read while a buffer is already mapped won't changed the result of get -// mapped range -TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) { - // Successful map - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - - FlushServer(); - - // Map failure while the buffer is already mapped - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - - FlushServer(); - - EXPECT_EQ(bufferContent, - *static_cast(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); -} - -// Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback -TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) - .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); })); - - FlushServer(); - - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); -} - -// Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the -// callback -TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) - .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); })); - - FlushServer(); - - EXPECT_CALL(api, BufferRelease(apiBuffer)); - - FlushClient(); -} - -// Tests specific to mapping for writing -class WireBufferMappingWriteTests : public WireBufferMappingTests { - public: - WireBufferMappingWriteTests() { + // Check the map read callback is called with "UnmappedBeforeCallback" when the map request + // would have worked, but Unmap was called + TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); + + // Oh no! We are calling Unmap too early! However the callback gets fired only after we get + // an answer from the server. + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)); + + FlushClient(); + + // The callback shouldn't get called with success, even when the request succeeded on the + // server side + EXPECT_CALL(*mockBufferMapCallback, + Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _)) + .Times(1); + + FlushServer(); } - ~WireBufferMappingWriteTests() override = default; - void SetUp() override { - WireBufferMappingTests::SetUp(); + // Check that even if Unmap() was called early client-side, we correctly surface server-side + // validation errors. + TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - SetupBuffer(WGPUBufferUsage_MapWrite); + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); + + // Oh no! We are calling Unmap too early! However the callback gets fired only after we get + // an answer from the server that the mapAsync call was an error. + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)); + + FlushClient(); + + // The callback should be called with the server-side error and not the + // UnmappedBeforeCallback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + + FlushServer(); } -}; -// Check mapping for writing a succesfully created buffer -TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + // Check the map read callback is called with "DestroyedBeforeCallback" when the map request + // would have worked, but Destroy was called + TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - uint32_t serverBufferContent = 31337; - uint32_t updatedContent = 4242; + // Oh no! We are calling Unmap too early! However the callback gets fired only after we get + // an answer from the server. + wgpuBufferDestroy(buffer); + EXPECT_CALL(api, BufferDestroy(apiBuffer)); - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&serverBufferContent)); - - FlushClient(); - - // The map write callback always gets a buffer full of zeroes. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + FlushClient(); - FlushServer(); + // The callback shouldn't get called with success, even when the request succeeded on the + // server side + EXPECT_CALL(*mockBufferMapCallback, + Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _)) + .Times(1); + + FlushServer(); + } + + // Check that even if Destroy() was called early client-side, we correctly surface server-side + // validation errors. + TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); - uint32_t* lastMapWritePointer = - static_cast(wgpuBufferGetMappedRange(buffer, 0, kBufferSize)); - ASSERT_EQ(0u, *lastMapWritePointer); + // Oh no! We are calling Destroy too early! However the callback gets fired only after we + // get an answer from the server that the mapAsync call was an error. + wgpuBufferDestroy(buffer); + EXPECT_CALL(api, BufferDestroy(apiBuffer)); - // Write something to the mapped pointer - *lastMapWritePointer = updatedContent; + FlushClient(); - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + // The callback should be called with the server-side error and not the + // DestroyedBeforCallback.. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - FlushClient(); + FlushServer(); + } - // After the buffer is unmapped, the content of the buffer is updated on the server - ASSERT_EQ(serverBufferContent, updatedContent); -} + // Check that an error map read while a buffer is already mapped won't changed the result of get + // mapped range + TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) { + // Successful map + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); -// Check that things work correctly when a validation error happens when mapping the buffer for -// writing -TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - FlushClient(); + FlushServer(); - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + // Map failure while the buffer is already mapped + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); - FlushServer(); + FlushClient(); - EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, kBufferSize)); -} + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); -// Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is -// destroyed before the request is finished -TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + FlushServer(); - // Return success - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); + EXPECT_EQ(bufferContent, *static_cast( + wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); + } - // Destroy before the client gets the success, so the callback is called with - // DestroyedBeforeCallback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _)) - .Times(1); - wgpuBufferRelease(buffer); - EXPECT_CALL(api, BufferRelease(apiBuffer)); + // Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback + TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - FlushClient(); - FlushServer(); -} + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); -// Check the map write callback is called with "UnmappedBeforeCallback" when the map request would -// have worked, but Unmap was called -TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + FlushClient(); - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) + .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); })); - FlushClient(); + FlushServer(); - // Oh no! We are calling Unmap too early! - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _)) - .Times(1); - wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - // The callback shouldn't get called, even when the request succeeded on the server side - FlushServer(); -} + FlushClient(); + } -// Check that an error map write while a buffer is already mapped -TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) { - // Successful map - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + // Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the + // callback + TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - FlushClient(); + FlushClient(); - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) + .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); })); - FlushServer(); + FlushServer(); - // Map failure while the buffer is already mapped - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); + EXPECT_CALL(api, BufferRelease(apiBuffer)); - FlushClient(); + FlushClient(); + } - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + // Tests specific to mapping for writing + class WireBufferMappingWriteTests : public WireBufferMappingTests { + public: + WireBufferMappingWriteTests() { + } + ~WireBufferMappingWriteTests() override = default; - FlushServer(); + void SetUp() override { + WireBufferMappingTests::SetUp(); - EXPECT_NE(nullptr, - static_cast(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); -} + SetupBuffer(WGPUBufferUsage_MapWrite); + } + }; -// Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback -TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + // Check mapping for writing a succesfully created buffer + TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); + uint32_t serverBufferContent = 31337; + uint32_t updatedContent = 4242; - FlushClient(); + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&serverBufferContent)); - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) - .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); })); + FlushClient(); - FlushServer(); + // The map write callback always gets a buffer full of zeroes. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + + FlushServer(); + + uint32_t* lastMapWritePointer = + static_cast(wgpuBufferGetMappedRange(buffer, 0, kBufferSize)); + ASSERT_EQ(0u, *lastMapWritePointer); + + // Write something to the mapped pointer + *lastMapWritePointer = updatedContent; - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + + // After the buffer is unmapped, the content of the buffer is updated on the server + ASSERT_EQ(serverBufferContent, updatedContent); + } + + // Check that things work correctly when a validation error happens when mapping the buffer for + // writing + TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - FlushClient(); -} + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); -// Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in the -// callback -TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) { - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + FlushClient(); - uint32_t bufferContent = 31337; - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&bufferContent)); + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - FlushClient(); + FlushServer(); - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) - .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); })); + EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, kBufferSize)); + } + + // Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is + // destroyed before the request is finished + TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // Return success + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - FlushServer(); + // Destroy before the client gets the success, so the callback is called with + // DestroyedBeforeCallback. + EXPECT_CALL(*mockBufferMapCallback, + Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _)) + .Times(1); + wgpuBufferRelease(buffer); + EXPECT_CALL(api, BufferRelease(apiBuffer)); - EXPECT_CALL(api, BufferRelease(apiBuffer)); + FlushClient(); + FlushServer(); + } + + // Check the map write callback is called with "UnmappedBeforeCallback" when the map request + // would have worked, but Unmap was called + TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - FlushClient(); -} + FlushClient(); -// Test successful buffer creation with mappedAtCreation=true -TEST_F(WireBufferMappingTests, MappedAtCreationSuccess) { - WGPUBufferDescriptor descriptor = {}; - descriptor.size = 4; - descriptor.mappedAtCreation = true; + // Oh no! We are calling Unmap too early! + EXPECT_CALL(*mockBufferMapCallback, + Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _)) + .Times(1); + wgpuBufferUnmap(buffer); - WGPUBuffer apiBuffer = api.GetNewBuffer(); - uint32_t apiBufferData = 1234; + // The callback shouldn't get called, even when the request succeeded on the server side + FlushServer(); + } - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + // Check that an error map write while a buffer is already mapped + TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) { + // Successful map + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - FlushClient(); + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + FlushServer(); + + // Map failure while the buffer is already mapped + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); + + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + + FlushServer(); + + EXPECT_NE(nullptr, static_cast( + wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); + } - FlushClient(); -} + // Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback + TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); + + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) + .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); })); + + FlushServer(); + + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + } + + // Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in + // the callback + TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) { + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); -// Test that releasing a buffer mapped at creation does not call Unmap -TEST_F(WireBufferMappingTests, MappedAtCreationReleaseBeforeUnmap) { - WGPUBufferDescriptor descriptor = {}; - descriptor.size = 4; - descriptor.mappedAtCreation = true; + uint32_t bufferContent = 31337; + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&bufferContent)); - WGPUBuffer apiBuffer = api.GetNewBuffer(); - uint32_t apiBufferData = 1234; + FlushClient(); - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)) + .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); })); - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); + FlushServer(); - FlushClient(); + EXPECT_CALL(api, BufferRelease(apiBuffer)); - wgpuBufferRelease(buffer); - EXPECT_CALL(api, BufferRelease(apiBuffer)).Times(1); + FlushClient(); + } - FlushClient(); -} - -// Test that it is valid to map a buffer after it is mapped at creation and unmapped -TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) { - WGPUBufferDescriptor descriptor = {}; - descriptor.size = 4; - descriptor.usage = WGPUMapMode_Write; - descriptor.mappedAtCreation = true; - - WGPUBuffer apiBuffer = api.GetNewBuffer(); - uint32_t apiBufferData = 1234; - - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); - - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); - - FlushClient(); - - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&apiBufferData)); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - - FlushServer(); -} - -// Test that it is invalid to map a buffer after mappedAtCreation but before Unmap -TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) { - WGPUBufferDescriptor descriptor = {}; - descriptor.size = 4; - descriptor.mappedAtCreation = true; - - WGPUBuffer apiBuffer = api.GetNewBuffer(); - uint32_t apiBufferData = 1234; - - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); - - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); - - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); - - FlushClient(); - - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - - FlushServer(); - - EXPECT_NE(nullptr, - static_cast(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); - - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); -} - -// Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client and -// never gets to the server-side. -TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) { - size_t kOOMSize = std::numeric_limits::max(); - WGPUBuffer apiBuffer = api.GetNewBuffer(); - - // Check for CreateBufferMapped. - { + // Test successful buffer creation with mappedAtCreation=true + TEST_F(WireBufferMappingTests, MappedAtCreationSuccess) { WGPUBufferDescriptor descriptor = {}; - descriptor.usage = WGPUBufferUsage_CopySrc; - descriptor.size = kOOMSize; + descriptor.size = 4; descriptor.mappedAtCreation = true; - wgpuDeviceCreateBuffer(device, &descriptor); - EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _)); - EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer)); + WGPUBuffer apiBuffer = api.GetNewBuffer(); + uint32_t apiBufferData = 1234; + + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); + + FlushClient(); + + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + FlushClient(); } - // Check for MapRead usage. - { + // Test that releasing a buffer mapped at creation does not call Unmap + TEST_F(WireBufferMappingTests, MappedAtCreationReleaseBeforeUnmap) { WGPUBufferDescriptor descriptor = {}; - descriptor.usage = WGPUBufferUsage_MapRead; - descriptor.size = kOOMSize; + descriptor.size = 4; + descriptor.mappedAtCreation = true; + + WGPUBuffer apiBuffer = api.GetNewBuffer(); + uint32_t apiBufferData = 1234; + + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); + + FlushClient(); + + wgpuBufferRelease(buffer); + EXPECT_CALL(api, BufferRelease(apiBuffer)).Times(1); - wgpuDeviceCreateBuffer(device, &descriptor); - EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _)); - EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer)); FlushClient(); } - // Check for MapWrite usage. - { + // Test that it is valid to map a buffer after it is mapped at creation and unmapped + TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) { WGPUBufferDescriptor descriptor = {}; - descriptor.usage = WGPUBufferUsage_MapWrite; - descriptor.size = kOOMSize; + descriptor.size = 4; + descriptor.usage = WGPUMapMode_Write; + descriptor.mappedAtCreation = true; + + WGPUBuffer apiBuffer = api.GetNewBuffer(); + uint32_t apiBufferData = 1234; + + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); + + FlushClient(); + + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&apiBufferData)); + + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + + FlushServer(); + } + + // Test that it is invalid to map a buffer after mappedAtCreation but before Unmap + TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) { + WGPUBufferDescriptor descriptor = {}; + descriptor.size = 4; + descriptor.mappedAtCreation = true; + + WGPUBuffer apiBuffer = api.GetNewBuffer(); + uint32_t apiBufferData = 1234; + + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData)); + + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); + + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + + FlushServer(); + + EXPECT_NE(nullptr, static_cast( + wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize))); + + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - wgpuDeviceCreateBuffer(device, &descriptor); - EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _)); - EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer)); FlushClient(); } -} -// Test that registering a callback then wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireBufferMappingTests, MapThenDisconnect) { - SetupBuffer(WGPUMapMode_Write); - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, this); + // Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client + // and never gets to the server-side. + TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) { + size_t kOOMSize = std::numeric_limits::max(); + WGPUBuffer apiBuffer = api.GetNewBuffer(); - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1); + // Check for CreateBufferMapped. + { + WGPUBufferDescriptor descriptor = {}; + descriptor.usage = WGPUBufferUsage_CopySrc; + descriptor.size = kOOMSize; + descriptor.mappedAtCreation = true; - FlushClient(); + wgpuDeviceCreateBuffer(device, &descriptor); + EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _)); + EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer)); + FlushClient(); + } - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1); - GetWireClient()->Disconnect(); -} + // Check for MapRead usage. + { + WGPUBufferDescriptor descriptor = {}; + descriptor.usage = WGPUBufferUsage_MapRead; + descriptor.size = kOOMSize; -// Test that registering a callback after wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireBufferMappingTests, MapAfterDisconnect) { - SetupBuffer(WGPUMapMode_Read); + wgpuDeviceCreateBuffer(device, &descriptor); + EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _)); + EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer)); + FlushClient(); + } - GetWireClient()->Disconnect(); + // Check for MapWrite usage. + { + WGPUBufferDescriptor descriptor = {}; + descriptor.usage = WGPUBufferUsage_MapWrite; + descriptor.size = kOOMSize; - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1); - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this); -} - -// Hack to pass in test context into user callback -struct TestData { - WireBufferMappingTests* pTest; - WGPUBuffer* pTestBuffer; - size_t numRequests; -}; - -static void ToMockBufferMapCallbackWithNewRequests(WGPUBufferMapAsyncStatus status, - void* userdata) { - TestData* testData = reinterpret_cast(userdata); - // Mimic the user callback is sending new requests - ASSERT_NE(testData, nullptr); - ASSERT_NE(testData->pTest, nullptr); - ASSERT_NE(testData->pTestBuffer, nullptr); - - mockBufferMapCallback->Call(status, testData->pTest); - - // Send the requests a number of times - for (size_t i = 0; i < testData->numRequests; i++) { - wgpuBufferMapAsync(*(testData->pTestBuffer), WGPUMapMode_Write, 0, sizeof(uint32_t), - ToMockBufferMapCallback, testData->pTest); + wgpuDeviceCreateBuffer(device, &descriptor); + EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _)); + EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer)); + FlushClient(); + } } -} -// Test that requests inside user callbacks before disconnect are called -TEST_F(WireBufferMappingTests, MapInsideCallbackBeforeDisconnect) { - SetupBuffer(WGPUMapMode_Write); - TestData testData = {this, &buffer, 10}; - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, - ToMockBufferMapCallbackWithNewRequests, &testData); + // Test that registering a callback then wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireBufferMappingTests, MapThenDisconnect) { + SetupBuffer(WGPUMapMode_Write); + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + this); - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1); + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1); - FlushClient(); + FlushClient(); - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)) - .Times(1 + testData.numRequests); - GetWireClient()->Disconnect(); -} + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)) + .Times(1); + GetWireClient()->Disconnect(); + } -// Test that requests inside user callbacks before object destruction are called -TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) { - TestData testData = {this, &buffer, 10}; - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, - ToMockBufferMapCallbackWithNewRequests, &testData); + // Test that registering a callback after wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireBufferMappingTests, MapAfterDisconnect) { + SetupBuffer(WGPUMapMode_Read); - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1); + GetWireClient()->Disconnect(); - FlushClient(); + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)) + .Times(1); + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this); + } - EXPECT_CALL(*mockBufferMapCallback, - Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, this)) - .Times(1 + testData.numRequests); - wgpuBufferRelease(buffer); -} + // Hack to pass in test context into user callback + struct TestData { + WireBufferMappingTests* pTest; + WGPUBuffer* pTestBuffer; + size_t numRequests; + }; + + static void ToMockBufferMapCallbackWithNewRequests(WGPUBufferMapAsyncStatus status, + void* userdata) { + TestData* testData = reinterpret_cast(userdata); + // Mimic the user callback is sending new requests + ASSERT_NE(testData, nullptr); + ASSERT_NE(testData->pTest, nullptr); + ASSERT_NE(testData->pTestBuffer, nullptr); + + mockBufferMapCallback->Call(status, testData->pTest); + + // Send the requests a number of times + for (size_t i = 0; i < testData->numRequests; i++) { + wgpuBufferMapAsync(*(testData->pTestBuffer), WGPUMapMode_Write, 0, sizeof(uint32_t), + ToMockBufferMapCallback, testData->pTest); + } + } + + // Test that requests inside user callbacks before disconnect are called + TEST_F(WireBufferMappingTests, MapInsideCallbackBeforeDisconnect) { + SetupBuffer(WGPUMapMode_Write); + TestData testData = {this, &buffer, 10}; + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, + ToMockBufferMapCallbackWithNewRequests, &testData); + + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1); + + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)) + .Times(1 + testData.numRequests); + GetWireClient()->Disconnect(); + } + + // Test that requests inside user callbacks before object destruction are called + TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) { + TestData testData = {this, &buffer, 10}; + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, + ToMockBufferMapCallbackWithNewRequests, &testData); + + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1); + + FlushClient(); + + EXPECT_CALL(*mockBufferMapCallback, + Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, this)) + .Times(1 + testData.numRequests); + wgpuBufferRelease(buffer); + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp index 7814c2107c..e03189009e 100644 --- a/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp +++ b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp @@ -17,361 +17,369 @@ #include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/wire/WireClient.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { + namespace { -namespace { + using testing::_; + using testing::InvokeWithoutArgs; + using testing::Mock; + using testing::Return; + using testing::Sequence; + using testing::StrEq; + using testing::StrictMock; - // Mock class to add expectations on the wire calling callbacks - class MockCreateComputePipelineAsyncCallback { + // Mock class to add expectations on the wire calling callbacks + class MockCreateComputePipelineAsyncCallback { + public: + MOCK_METHOD(void, + Call, + (WGPUCreatePipelineAsyncStatus status, + WGPUComputePipeline pipeline, + const char* message, + void* userdata)); + }; + + std::unique_ptr> + mockCreateComputePipelineAsyncCallback; + void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status, + WGPUComputePipeline pipeline, + const char* message, + void* userdata) { + mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata); + } + + class MockCreateRenderPipelineAsyncCallback { + public: + MOCK_METHOD(void, + Call, + (WGPUCreatePipelineAsyncStatus status, + WGPURenderPipeline pipeline, + const char* message, + void* userdata)); + }; + + std::unique_ptr> + mockCreateRenderPipelineAsyncCallback; + void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status, + WGPURenderPipeline pipeline, + const char* message, + void* userdata) { + mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata); + } + + } // anonymous namespace + + class WireCreatePipelineAsyncTest : public WireTest { public: - MOCK_METHOD(void, - Call, - (WGPUCreatePipelineAsyncStatus status, - WGPUComputePipeline pipeline, - const char* message, - void* userdata)); + void SetUp() override { + WireTest::SetUp(); + + mockCreateComputePipelineAsyncCallback = + std::make_unique>(); + mockCreateRenderPipelineAsyncCallback = + std::make_unique>(); + } + + void TearDown() override { + WireTest::TearDown(); + + // Delete mock so that expectations are checked + mockCreateComputePipelineAsyncCallback = nullptr; + mockCreateRenderPipelineAsyncCallback = nullptr; + } + + void FlushClient() { + WireTest::FlushClient(); + Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback); + } + + void FlushServer() { + WireTest::FlushServer(); + Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback); + } }; - std::unique_ptr> - mockCreateComputePipelineAsyncCallback; - void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status, - WGPUComputePipeline pipeline, - const char* message, - void* userdata) { - mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata); + // Test when creating a compute pipeline with CreateComputePipelineAsync() successfully. + TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) { + WGPUShaderModuleDescriptor csDescriptor{}; + WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); + WGPUShaderModule apiCsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); + + WGPUComputePipelineDescriptor descriptor{}; + descriptor.compute.module = csModule; + descriptor.compute.entryPoint = "main"; + + wgpuDeviceCreateComputePipelineAsync(device, &descriptor, + ToMockCreateComputePipelineAsyncCallback, this); + + EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallDeviceCreateComputePipelineAsyncCallback( + apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); + })); + + FlushClient(); + + EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this)) + .Times(1); + + FlushServer(); } - class MockCreateRenderPipelineAsyncCallback { - public: - MOCK_METHOD(void, - Call, - (WGPUCreatePipelineAsyncStatus status, - WGPURenderPipeline pipeline, - const char* message, - void* userdata)); - }; + // Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error. + TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) { + WGPUShaderModuleDescriptor csDescriptor{}; + WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); + WGPUShaderModule apiCsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); - std::unique_ptr> - mockCreateRenderPipelineAsyncCallback; - void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status, - WGPURenderPipeline pipeline, - const char* message, - void* userdata) { - mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata); + WGPUComputePipelineDescriptor descriptor{}; + descriptor.compute.module = csModule; + descriptor.compute.entryPoint = "main"; + + wgpuDeviceCreateComputePipelineAsync(device, &descriptor, + ToMockCreateComputePipelineAsyncCallback, this); + + EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallDeviceCreateComputePipelineAsyncCallback( + apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message"); + })); + + FlushClient(); + + EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this)) + .Times(1); + + FlushServer(); } -} // anonymous namespace + // Test when creating a render pipeline with CreateRenderPipelineAsync() successfully. + TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) { + WGPUShaderModuleDescriptor vertexDescriptor = {}; + WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); + WGPUShaderModule apiVsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); -class WireCreatePipelineAsyncTest : public WireTest { - public: - void SetUp() override { - WireTest::SetUp(); + WGPURenderPipelineDescriptor pipelineDescriptor{}; + pipelineDescriptor.vertex.module = vsModule; + pipelineDescriptor.vertex.entryPoint = "main"; - mockCreateComputePipelineAsyncCallback = - std::make_unique>(); - mockCreateRenderPipelineAsyncCallback = - std::make_unique>(); + WGPUFragmentState fragment = {}; + fragment.module = vsModule; + fragment.entryPoint = "main"; + pipelineDescriptor.fragment = &fragment; + + wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, + ToMockCreateRenderPipelineAsyncCallback, this); + EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallDeviceCreateRenderPipelineAsyncCallback( + apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); + })); + + FlushClient(); + + EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this)) + .Times(1); + + FlushServer(); } - void TearDown() override { - WireTest::TearDown(); + // Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error. + TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) { + WGPUShaderModuleDescriptor vertexDescriptor = {}; + WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); + WGPUShaderModule apiVsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); - // Delete mock so that expectations are checked - mockCreateComputePipelineAsyncCallback = nullptr; - mockCreateRenderPipelineAsyncCallback = nullptr; + WGPURenderPipelineDescriptor pipelineDescriptor{}; + pipelineDescriptor.vertex.module = vsModule; + pipelineDescriptor.vertex.entryPoint = "main"; + + WGPUFragmentState fragment = {}; + fragment.module = vsModule; + fragment.entryPoint = "main"; + pipelineDescriptor.fragment = &fragment; + + wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, + ToMockCreateRenderPipelineAsyncCallback, this); + EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallDeviceCreateRenderPipelineAsyncCallback( + apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message"); + })); + + FlushClient(); + + EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this)) + .Times(1); + + FlushServer(); } - void FlushClient() { - WireTest::FlushClient(); - Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback); + // Test that registering a callback then wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) { + WGPUShaderModuleDescriptor vertexDescriptor = {}; + WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); + WGPUShaderModule apiVsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); + + WGPUFragmentState fragment = {}; + fragment.module = vsModule; + fragment.entryPoint = "main"; + + WGPURenderPipelineDescriptor pipelineDescriptor{}; + pipelineDescriptor.vertex.module = vsModule; + pipelineDescriptor.vertex.entryPoint = "main"; + pipelineDescriptor.fragment = &fragment; + + wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, + ToMockCreateRenderPipelineAsyncCallback, this); + EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallDeviceCreateRenderPipelineAsyncCallback( + apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); + })); + + FlushClient(); + + EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this)) + .Times(1); + GetWireClient()->Disconnect(); } - void FlushServer() { - WireTest::FlushServer(); - Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback); + // Test that registering a callback then wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) { + WGPUShaderModuleDescriptor csDescriptor{}; + WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); + WGPUShaderModule apiCsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); + + WGPUComputePipelineDescriptor descriptor{}; + descriptor.compute.module = csModule; + descriptor.compute.entryPoint = "main"; + + wgpuDeviceCreateComputePipelineAsync(device, &descriptor, + ToMockCreateComputePipelineAsyncCallback, this); + EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallDeviceCreateComputePipelineAsyncCallback( + apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); + })); + + FlushClient(); + + EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this)) + .Times(1); + GetWireClient()->Disconnect(); } -}; -// Test when creating a compute pipeline with CreateComputePipelineAsync() successfully. -TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) { - WGPUShaderModuleDescriptor csDescriptor{}; - WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); - WGPUShaderModule apiCsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); + // Test that registering a callback after wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) { + WGPUShaderModuleDescriptor vertexDescriptor = {}; + WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); + WGPUShaderModule apiVsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); - WGPUComputePipelineDescriptor descriptor{}; - descriptor.compute.module = csModule; - descriptor.compute.entryPoint = "main"; + WGPUFragmentState fragment = {}; + fragment.module = vsModule; + fragment.entryPoint = "main"; - wgpuDeviceCreateComputePipelineAsync(device, &descriptor, - ToMockCreateComputePipelineAsyncCallback, this); + WGPURenderPipelineDescriptor pipelineDescriptor{}; + pipelineDescriptor.vertex.module = vsModule; + pipelineDescriptor.vertex.entryPoint = "main"; + pipelineDescriptor.fragment = &fragment; - EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallDeviceCreateComputePipelineAsyncCallback( - apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); - })); + FlushClient(); - FlushClient(); + GetWireClient()->Disconnect(); - EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this)) - .Times(1); + EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this)) + .Times(1); + wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, + ToMockCreateRenderPipelineAsyncCallback, this); + } - FlushServer(); -} + // Test that registering a callback after wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) { + WGPUShaderModuleDescriptor csDescriptor{}; + WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); + WGPUShaderModule apiCsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); -// Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error. -TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) { - WGPUShaderModuleDescriptor csDescriptor{}; - WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); - WGPUShaderModule apiCsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); + WGPUComputePipelineDescriptor descriptor{}; + descriptor.compute.module = csModule; + descriptor.compute.entryPoint = "main"; - WGPUComputePipelineDescriptor descriptor{}; - descriptor.compute.module = csModule; - descriptor.compute.entryPoint = "main"; + FlushClient(); - wgpuDeviceCreateComputePipelineAsync(device, &descriptor, - ToMockCreateComputePipelineAsyncCallback, this); + GetWireClient()->Disconnect(); - EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallDeviceCreateComputePipelineAsyncCallback( - apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message"); - })); + EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this)) + .Times(1); - FlushClient(); + wgpuDeviceCreateComputePipelineAsync(device, &descriptor, + ToMockCreateComputePipelineAsyncCallback, this); + } - EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this)) - .Times(1); + TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) { + WGPUShaderModuleDescriptor vertexDescriptor = {}; + WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); + WGPUShaderModule apiModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule)); - FlushServer(); -} + WGPURenderPipelineDescriptor pipelineDescriptor{}; + pipelineDescriptor.vertex.module = module; + pipelineDescriptor.vertex.entryPoint = "main"; -// Test when creating a render pipeline with CreateRenderPipelineAsync() successfully. -TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) { - WGPUShaderModuleDescriptor vertexDescriptor = {}; - WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); - WGPUShaderModule apiVsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); + WGPUFragmentState fragment = {}; + fragment.module = module; + fragment.entryPoint = "main"; + pipelineDescriptor.fragment = &fragment; - WGPURenderPipelineDescriptor pipelineDescriptor{}; - pipelineDescriptor.vertex.module = vsModule; - pipelineDescriptor.vertex.entryPoint = "main"; + wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, + ToMockCreateRenderPipelineAsyncCallback, this); - WGPUFragmentState fragment = {}; - fragment.module = vsModule; - fragment.entryPoint = "main"; - pipelineDescriptor.fragment = &fragment; + EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)); + FlushClient(); - wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, - ToMockCreateRenderPipelineAsyncCallback, this); - EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallDeviceCreateRenderPipelineAsyncCallback( - apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); - })); + EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, + Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this)) + .Times(1); - FlushClient(); + wgpuDeviceRelease(device); - EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this)) - .Times(1); + // Expect release on all objects created by the client. + Sequence s1, s2; + EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1); + EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2); - FlushServer(); -} + FlushClient(); + DefaultApiDeviceWasReleased(); + } -// Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error. -TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) { - WGPUShaderModuleDescriptor vertexDescriptor = {}; - WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); - WGPUShaderModule apiVsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); - - WGPURenderPipelineDescriptor pipelineDescriptor{}; - pipelineDescriptor.vertex.module = vsModule; - pipelineDescriptor.vertex.entryPoint = "main"; - - WGPUFragmentState fragment = {}; - fragment.module = vsModule; - fragment.entryPoint = "main"; - pipelineDescriptor.fragment = &fragment; - - wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, - ToMockCreateRenderPipelineAsyncCallback, this); - EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallDeviceCreateRenderPipelineAsyncCallback( - apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message"); - })); - - FlushClient(); - - EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this)) - .Times(1); - - FlushServer(); -} - -// Test that registering a callback then wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) { - WGPUShaderModuleDescriptor vertexDescriptor = {}; - WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); - WGPUShaderModule apiVsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); - - WGPUFragmentState fragment = {}; - fragment.module = vsModule; - fragment.entryPoint = "main"; - - WGPURenderPipelineDescriptor pipelineDescriptor{}; - pipelineDescriptor.vertex.module = vsModule; - pipelineDescriptor.vertex.entryPoint = "main"; - pipelineDescriptor.fragment = &fragment; - - wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, - ToMockCreateRenderPipelineAsyncCallback, this); - EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallDeviceCreateRenderPipelineAsyncCallback( - apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); - })); - - FlushClient(); - - EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this)) - .Times(1); - GetWireClient()->Disconnect(); -} - -// Test that registering a callback then wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) { - WGPUShaderModuleDescriptor csDescriptor{}; - WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); - WGPUShaderModule apiCsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); - - WGPUComputePipelineDescriptor descriptor{}; - descriptor.compute.module = csModule; - descriptor.compute.entryPoint = "main"; - - wgpuDeviceCreateComputePipelineAsync(device, &descriptor, - ToMockCreateComputePipelineAsyncCallback, this); - EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallDeviceCreateComputePipelineAsyncCallback( - apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, ""); - })); - - FlushClient(); - - EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this)) - .Times(1); - GetWireClient()->Disconnect(); -} - -// Test that registering a callback after wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) { - WGPUShaderModuleDescriptor vertexDescriptor = {}; - WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); - WGPUShaderModule apiVsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); - - WGPUFragmentState fragment = {}; - fragment.module = vsModule; - fragment.entryPoint = "main"; - - WGPURenderPipelineDescriptor pipelineDescriptor{}; - pipelineDescriptor.vertex.module = vsModule; - pipelineDescriptor.vertex.entryPoint = "main"; - pipelineDescriptor.fragment = &fragment; - - FlushClient(); - - GetWireClient()->Disconnect(); - - EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this)) - .Times(1); - wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, - ToMockCreateRenderPipelineAsyncCallback, this); -} - -// Test that registering a callback after wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) { - WGPUShaderModuleDescriptor csDescriptor{}; - WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); - WGPUShaderModule apiCsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); - - WGPUComputePipelineDescriptor descriptor{}; - descriptor.compute.module = csModule; - descriptor.compute.entryPoint = "main"; - - FlushClient(); - - GetWireClient()->Disconnect(); - - EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this)) - .Times(1); - - wgpuDeviceCreateComputePipelineAsync(device, &descriptor, - ToMockCreateComputePipelineAsyncCallback, this); -} - -TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) { - WGPUShaderModuleDescriptor vertexDescriptor = {}; - WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); - WGPUShaderModule apiModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule)); - - WGPURenderPipelineDescriptor pipelineDescriptor{}; - pipelineDescriptor.vertex.module = module; - pipelineDescriptor.vertex.entryPoint = "main"; - - WGPUFragmentState fragment = {}; - fragment.module = module; - fragment.entryPoint = "main"; - pipelineDescriptor.fragment = &fragment; - - wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, - ToMockCreateRenderPipelineAsyncCallback, this); - - EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)); - FlushClient(); - - EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, - Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this)) - .Times(1); - - wgpuDeviceRelease(device); - - // Expect release on all objects created by the client. - Sequence s1, s2; - EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1); - EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2); - - FlushClient(); - DefaultApiDeviceWasReleased(); -} +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp index 658325ccaf..ad349391bc 100644 --- a/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp +++ b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp @@ -15,44 +15,49 @@ #include "dawn/tests/MockCallback.h" #include "dawn/tests/unittests/wire/WireTest.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireDestroyObjectTests : public WireTest {}; + using testing::Return; + using testing::Sequence; -// Test that destroying the device also destroys child objects. -TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) { - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); + class WireDestroyObjectTests : public WireTest {}; - WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder)); + // Test that destroying the device also destroys child objects. + TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) { + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - FlushClient(); + WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiEncoder)); - // Release the device. It should cause the command encoder to be destroyed. - wgpuDeviceRelease(device); + FlushClient(); - Sequence s1, s2; - // The device and child objects should be released. - EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1); - EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2); + // Release the device. It should cause the command encoder to be destroyed. + wgpuDeviceRelease(device); - FlushClient(); + Sequence s1, s2; + // The device and child objects should be released. + EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1); + EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2); - // Signal that we already released and cleared callbacks for |apiDevice| - DefaultApiDeviceWasReleased(); + FlushClient(); - // Using the command encoder should be an error. - wgpuCommandEncoderFinish(encoder, nullptr); - FlushClient(false); -} + // Signal that we already released and cleared callbacks for |apiDevice| + DefaultApiDeviceWasReleased(); + + // Using the command encoder should be an error. + wgpuCommandEncoderFinish(encoder, nullptr); + FlushClient(false); + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp index 5d449c6eb8..d8f397c471 100644 --- a/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp +++ b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp @@ -18,157 +18,167 @@ #include "dawn/tests/MockCallback.h" #include "dawn/wire/WireClient.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -namespace { + using testing::_; + using testing::Exactly; + using testing::InvokeWithoutArgs; + using testing::MockCallback; + using testing::Return; + using testing::Sequence; + using testing::StrEq; - class WireDisconnectTests : public WireTest {}; + namespace { -} // anonymous namespace + class WireDisconnectTests : public WireTest {}; -// Test that commands are not received if the client disconnects. -TEST_F(WireDisconnectTests, CommandsAfterDisconnect) { - // Check that commands work at all. - wgpuDeviceCreateCommandEncoder(device, nullptr); + } // anonymous namespace - WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .WillOnce(Return(apiCmdBufEncoder)); - FlushClient(); + // Test that commands are not received if the client disconnects. + TEST_F(WireDisconnectTests, CommandsAfterDisconnect) { + // Check that commands work at all. + wgpuDeviceCreateCommandEncoder(device, nullptr); - // Disconnect. - GetWireClient()->Disconnect(); + WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiCmdBufEncoder)); + FlushClient(); - // Command is not received because client disconnected. - wgpuDeviceCreateCommandEncoder(device, nullptr); - EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0)); - FlushClient(); -} + // Disconnect. + GetWireClient()->Disconnect(); -// Test that commands that are serialized before a disconnect but flushed -// after are received. -TEST_F(WireDisconnectTests, FlushAfterDisconnect) { - // Check that commands work at all. - wgpuDeviceCreateCommandEncoder(device, nullptr); + // Command is not received because client disconnected. + wgpuDeviceCreateCommandEncoder(device, nullptr); + EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0)); + FlushClient(); + } - // Disconnect. - GetWireClient()->Disconnect(); + // Test that commands that are serialized before a disconnect but flushed + // after are received. + TEST_F(WireDisconnectTests, FlushAfterDisconnect) { + // Check that commands work at all. + wgpuDeviceCreateCommandEncoder(device, nullptr); - // Already-serialized commmands are still received. - WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .WillOnce(Return(apiCmdBufEncoder)); - FlushClient(); -} + // Disconnect. + GetWireClient()->Disconnect(); -// Check that disconnecting the wire client calls the device lost callback exacty once. -TEST_F(WireDisconnectTests, CallsDeviceLostCallback) { - MockCallback mockDeviceLostCallback; - wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), - mockDeviceLostCallback.MakeUserdata(this)); + // Already-serialized commmands are still received. + WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiCmdBufEncoder)); + FlushClient(); + } - // Disconnect the wire client. We should receive device lost only once. - EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this)) - .Times(Exactly(1)); - GetWireClient()->Disconnect(); - GetWireClient()->Disconnect(); -} + // Check that disconnecting the wire client calls the device lost callback exacty once. + TEST_F(WireDisconnectTests, CallsDeviceLostCallback) { + MockCallback mockDeviceLostCallback; + wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), + mockDeviceLostCallback.MakeUserdata(this)); -// Check that disconnecting the wire client after a device loss does not trigger the callback again. -TEST_F(WireDisconnectTests, ServerLostThenDisconnect) { - MockCallback mockDeviceLostCallback; - wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), - mockDeviceLostCallback.MakeUserdata(this)); + // Disconnect the wire client. We should receive device lost only once. + EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this)) + .Times(Exactly(1)); + GetWireClient()->Disconnect(); + GetWireClient()->Disconnect(); + } - api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, - "some reason"); + // Check that disconnecting the wire client after a device loss does not trigger the callback + // again. + TEST_F(WireDisconnectTests, ServerLostThenDisconnect) { + MockCallback mockDeviceLostCallback; + wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), + mockDeviceLostCallback.MakeUserdata(this)); - // Flush the device lost return command. - EXPECT_CALL(mockDeviceLostCallback, - Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this)) - .Times(Exactly(1)); - FlushServer(); + api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, + "some reason"); - // Disconnect the client. We shouldn't see the lost callback again. - EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0)); - GetWireClient()->Disconnect(); -} + // Flush the device lost return command. + EXPECT_CALL(mockDeviceLostCallback, + Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this)) + .Times(Exactly(1)); + FlushServer(); -// Check that disconnecting the wire client inside the device loss callback does not trigger the -// callback again. -TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) { - MockCallback mockDeviceLostCallback; - wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), - mockDeviceLostCallback.MakeUserdata(this)); + // Disconnect the client. We shouldn't see the lost callback again. + EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0)); + GetWireClient()->Disconnect(); + } - api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, - "lost reason"); + // Check that disconnecting the wire client inside the device loss callback does not trigger the + // callback again. + TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) { + MockCallback mockDeviceLostCallback; + wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), + mockDeviceLostCallback.MakeUserdata(this)); - // Disconnect the client inside the lost callback. We should see the callback - // only once. - EXPECT_CALL(mockDeviceLostCallback, - Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this)) - .WillOnce(InvokeWithoutArgs([&]() { - EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0)); - GetWireClient()->Disconnect(); - })); - FlushServer(); -} + api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, + "lost reason"); -// Check that a device loss after a disconnect does not trigger the callback again. -TEST_F(WireDisconnectTests, DisconnectThenServerLost) { - MockCallback mockDeviceLostCallback; - wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), - mockDeviceLostCallback.MakeUserdata(this)); + // Disconnect the client inside the lost callback. We should see the callback + // only once. + EXPECT_CALL(mockDeviceLostCallback, + Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this)) + .WillOnce(InvokeWithoutArgs([&]() { + EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0)); + GetWireClient()->Disconnect(); + })); + FlushServer(); + } - // Disconnect the client. We should see the callback once. - EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this)) - .Times(Exactly(1)); - GetWireClient()->Disconnect(); + // Check that a device loss after a disconnect does not trigger the callback again. + TEST_F(WireDisconnectTests, DisconnectThenServerLost) { + MockCallback mockDeviceLostCallback; + wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), + mockDeviceLostCallback.MakeUserdata(this)); - // Lose the device on the server. The client callback shouldn't be - // called again. - api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, - "lost reason"); - EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0)); - FlushServer(); -} + // Disconnect the client. We should see the callback once. + EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this)) + .Times(Exactly(1)); + GetWireClient()->Disconnect(); -// Test that client objects are all destroyed if the WireClient is destroyed. -TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) { - WGPUSamplerDescriptor desc = {}; - wgpuDeviceCreateCommandEncoder(device, nullptr); - wgpuDeviceCreateSampler(device, &desc); + // Lose the device on the server. The client callback shouldn't be + // called again. + api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, + "lost reason"); + EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0)); + FlushServer(); + } - WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) - .WillOnce(Return(apiCommandEncoder)); + // Test that client objects are all destroyed if the WireClient is destroyed. + TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) { + WGPUSamplerDescriptor desc = {}; + wgpuDeviceCreateCommandEncoder(device, nullptr); + wgpuDeviceCreateSampler(device, &desc); - WGPUSampler apiSampler = api.GetNewSampler(); - EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler)); + WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) + .WillOnce(Return(apiCommandEncoder)); - FlushClient(); + WGPUSampler apiSampler = api.GetNewSampler(); + EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler)); - DeleteClient(); + FlushClient(); - // Expect release on all objects created by the client. - Sequence s1, s2, s3; - EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1); - EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2); - EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)) - .Times(1) - .InSequence(s1, s2); - EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3); - FlushClient(); + DeleteClient(); - // Signal that we already released and cleared callbacks for |apiDevice| - DefaultApiDeviceWasReleased(); -} + // Expect release on all objects created by the client. + Sequence s1, s2, s3; + EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1); + EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2); + EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)) + .Times(1) + .InSequence(s1, s2); + EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3); + FlushClient(); + + // Signal that we already released and cleared callbacks for |apiDevice| + DefaultApiDeviceWasReleased(); + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp index fc409ccd4b..045125d535 100644 --- a/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp +++ b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp @@ -17,290 +17,306 @@ #include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/wire/WireClient.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -namespace { + using testing::_; + using testing::DoAll; + using testing::Mock; + using testing::Return; + using testing::SaveArg; + using testing::StrEq; + using testing::StrictMock; - // Mock classes to add expectations on the wire calling callbacks - class MockDeviceErrorCallback { + namespace { + + // Mock classes to add expectations on the wire calling callbacks + class MockDeviceErrorCallback { + public: + MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata)); + }; + + std::unique_ptr> mockDeviceErrorCallback; + void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) { + mockDeviceErrorCallback->Call(type, message, userdata); + } + + class MockDevicePopErrorScopeCallback { + public: + MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata)); + }; + + std::unique_ptr> + mockDevicePopErrorScopeCallback; + void ToMockDevicePopErrorScopeCallback(WGPUErrorType type, + const char* message, + void* userdata) { + mockDevicePopErrorScopeCallback->Call(type, message, userdata); + } + + class MockDeviceLoggingCallback { + public: + MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata)); + }; + + std::unique_ptr> mockDeviceLoggingCallback; + void ToMockDeviceLoggingCallback(WGPULoggingType type, + const char* message, + void* userdata) { + mockDeviceLoggingCallback->Call(type, message, userdata); + } + + class MockDeviceLostCallback { + public: + MOCK_METHOD(void, + Call, + (WGPUDeviceLostReason reason, const char* message, void* userdata)); + }; + + std::unique_ptr> mockDeviceLostCallback; + void ToMockDeviceLostCallback(WGPUDeviceLostReason reason, + const char* message, + void* userdata) { + mockDeviceLostCallback->Call(reason, message, userdata); + } + + } // anonymous namespace + + class WireErrorCallbackTests : public WireTest { public: - MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata)); + WireErrorCallbackTests() { + } + ~WireErrorCallbackTests() override = default; + + void SetUp() override { + WireTest::SetUp(); + + mockDeviceErrorCallback = std::make_unique>(); + mockDeviceLoggingCallback = std::make_unique>(); + mockDevicePopErrorScopeCallback = + std::make_unique>(); + mockDeviceLostCallback = std::make_unique>(); + } + + void TearDown() override { + WireTest::TearDown(); + + mockDeviceErrorCallback = nullptr; + mockDeviceLoggingCallback = nullptr; + mockDevicePopErrorScopeCallback = nullptr; + mockDeviceLostCallback = nullptr; + } + + void FlushServer() { + WireTest::FlushServer(); + + Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback); + Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback); + } }; - std::unique_ptr> mockDeviceErrorCallback; - void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) { - mockDeviceErrorCallback->Call(type, message, userdata); + // Test the return wire for device error callbacks + TEST_F(WireErrorCallbackTests, DeviceErrorCallback) { + wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this); + + // Setting the error callback should stay on the client side and do nothing + FlushClient(); + + // Calling the callback on the server side will result in the callback being called on the + // client side + api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation, + "Some error message"); + + EXPECT_CALL(*mockDeviceErrorCallback, + Call(WGPUErrorType_Validation, StrEq("Some error message"), this)) + .Times(1); + + FlushServer(); } - class MockDevicePopErrorScopeCallback { - public: - MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata)); - }; + // Test the return wire for device user warning callbacks + TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) { + wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this); - std::unique_ptr> mockDevicePopErrorScopeCallback; - void ToMockDevicePopErrorScopeCallback(WGPUErrorType type, - const char* message, - void* userdata) { - mockDevicePopErrorScopeCallback->Call(type, message, userdata); + // Setting the injected warning callback should stay on the client side and do nothing + FlushClient(); + + // Calling the callback on the server side will result in the callback being called on the + // client side + api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message"); + + EXPECT_CALL(*mockDeviceLoggingCallback, + Call(WGPULoggingType_Info, StrEq("Some message"), this)) + .Times(1); + + FlushServer(); } - class MockDeviceLoggingCallback { - public: - MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata)); - }; - - std::unique_ptr> mockDeviceLoggingCallback; - void ToMockDeviceLoggingCallback(WGPULoggingType type, const char* message, void* userdata) { - mockDeviceLoggingCallback->Call(type, message, userdata); - } - - class MockDeviceLostCallback { - public: - MOCK_METHOD(void, Call, (WGPUDeviceLostReason reason, const char* message, void* userdata)); - }; - - std::unique_ptr> mockDeviceLostCallback; - void ToMockDeviceLostCallback(WGPUDeviceLostReason reason, - const char* message, - void* userdata) { - mockDeviceLostCallback->Call(reason, message, userdata); - } - -} // anonymous namespace - -class WireErrorCallbackTests : public WireTest { - public: - WireErrorCallbackTests() { - } - ~WireErrorCallbackTests() override = default; - - void SetUp() override { - WireTest::SetUp(); - - mockDeviceErrorCallback = std::make_unique>(); - mockDeviceLoggingCallback = std::make_unique>(); - mockDevicePopErrorScopeCallback = - std::make_unique>(); - mockDeviceLostCallback = std::make_unique>(); - } - - void TearDown() override { - WireTest::TearDown(); - - mockDeviceErrorCallback = nullptr; - mockDeviceLoggingCallback = nullptr; - mockDevicePopErrorScopeCallback = nullptr; - mockDeviceLostCallback = nullptr; - } - - void FlushServer() { - WireTest::FlushServer(); - - Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback); - Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback); - } -}; - -// Test the return wire for device error callbacks -TEST_F(WireErrorCallbackTests, DeviceErrorCallback) { - wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this); - - // Setting the error callback should stay on the client side and do nothing - FlushClient(); - - // Calling the callback on the server side will result in the callback being called on the - // client side - api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation, - "Some error message"); - - EXPECT_CALL(*mockDeviceErrorCallback, - Call(WGPUErrorType_Validation, StrEq("Some error message"), this)) - .Times(1); - - FlushServer(); -} - -// Test the return wire for device user warning callbacks -TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) { - wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this); - - // Setting the injected warning callback should stay on the client side and do nothing - FlushClient(); - - // Calling the callback on the server side will result in the callback being called on the - // client side - api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message"); - - EXPECT_CALL(*mockDeviceLoggingCallback, Call(WGPULoggingType_Info, StrEq("Some message"), this)) - .Times(1); - - FlushServer(); -} - -// Test the return wire for error scopes. -TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) { - EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); - wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); - FlushClient(); - - WGPUErrorCallback callback; - void* userdata; - EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) - .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true))); - wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); - FlushClient(); - - EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_Validation, StrEq("Some error message"), this)) - .Times(1); - callback(WGPUErrorType_Validation, "Some error message", userdata); - FlushServer(); -} - -// Test the return wire for error scopes when callbacks return in a various orders. -TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) { - // Two error scopes are popped, and the first one returns first. - { - EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2); - wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + // Test the return wire for error scopes. + TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) { + EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); FlushClient(); - WGPUErrorCallback callback1; - WGPUErrorCallback callback2; - void* userdata1; - void* userdata2; + WGPUErrorCallback callback; + void* userdata; EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) - .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true))) - .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true))); + .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true))); wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); - wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1); FlushClient(); EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_Validation, StrEq("First error message"), this)) + Call(WGPUErrorType_Validation, StrEq("Some error message"), this)) .Times(1); - callback1(WGPUErrorType_Validation, "First error message", userdata1); - FlushServer(); - - EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1)) - .Times(1); - callback2(WGPUErrorType_Validation, "Second error message", userdata2); + callback(WGPUErrorType_Validation, "Some error message", userdata); FlushServer(); } - // Two error scopes are popped, and the second one returns first. - { - EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2); - wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + // Test the return wire for error scopes when callbacks return in a various orders. + TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) { + // Two error scopes are popped, and the first one returns first. + { + EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2); + wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + FlushClient(); + + WGPUErrorCallback callback1; + WGPUErrorCallback callback2; + void* userdata1; + void* userdata2; + EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) + .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true))) + .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true))); + wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); + wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1); + FlushClient(); + + EXPECT_CALL(*mockDevicePopErrorScopeCallback, + Call(WGPUErrorType_Validation, StrEq("First error message"), this)) + .Times(1); + callback1(WGPUErrorType_Validation, "First error message", userdata1); + FlushServer(); + + EXPECT_CALL(*mockDevicePopErrorScopeCallback, + Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1)) + .Times(1); + callback2(WGPUErrorType_Validation, "Second error message", userdata2); + FlushServer(); + } + + // Two error scopes are popped, and the second one returns first. + { + EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2); + wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + FlushClient(); + + WGPUErrorCallback callback1; + WGPUErrorCallback callback2; + void* userdata1; + void* userdata2; + EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) + .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true))) + .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true))); + wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); + wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1); + FlushClient(); + + EXPECT_CALL(*mockDevicePopErrorScopeCallback, + Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1)) + .Times(1); + callback2(WGPUErrorType_Validation, "Second error message", userdata2); + FlushServer(); + + EXPECT_CALL(*mockDevicePopErrorScopeCallback, + Call(WGPUErrorType_Validation, StrEq("First error message"), this)) + .Times(1); + callback1(WGPUErrorType_Validation, "First error message", userdata1); + FlushServer(); + } + } + + // Test the return wire for error scopes in flight when the device is destroyed. + TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) { + EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); FlushClient(); - WGPUErrorCallback callback1; - WGPUErrorCallback callback2; - void* userdata1; - void* userdata2; + EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true)); + wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); + FlushClient(); + + // Incomplete callback called in Device destructor. This is resolved after the end of this + // test. + EXPECT_CALL(*mockDevicePopErrorScopeCallback, + Call(WGPUErrorType_Unknown, ValidStringMessage(), this)) + .Times(1); + } + + // Test that registering a callback then wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) { + EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); + wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + + EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true)); + wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); + FlushClient(); + + EXPECT_CALL(*mockDevicePopErrorScopeCallback, + Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this)) + .Times(1); + GetWireClient()->Disconnect(); + } + + // Test that registering a callback after wire disconnect calls the callback with + // DeviceLost. + TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) { + EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); + wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + FlushClient(); + + GetWireClient()->Disconnect(); + + EXPECT_CALL(*mockDevicePopErrorScopeCallback, + Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this)) + .Times(1); + wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); + } + + // Empty stack (We are emulating the errors that would be callback-ed from native). + TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) { + WGPUErrorCallback callback; + void* userdata; EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) - .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true))) - .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true))); + .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true))); wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); - wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1); FlushClient(); EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1)) + Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this)) .Times(1); - callback2(WGPUErrorType_Validation, "Second error message", userdata2); - FlushServer(); - - EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_Validation, StrEq("First error message"), this)) - .Times(1); - callback1(WGPUErrorType_Validation, "First error message", userdata1); + callback(WGPUErrorType_Validation, "No error scopes to pop", userdata); FlushServer(); } -} -// Test the return wire for error scopes in flight when the device is destroyed. -TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) { - EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); - wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); - FlushClient(); + // Test the return wire for device lost callback + TEST_F(WireErrorCallbackTests, DeviceLostCallback) { + wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this); - EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true)); - wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); - FlushClient(); + // Setting the error callback should stay on the client side and do nothing + FlushClient(); - // Incomplete callback called in Device destructor. This is resolved after the end of this test. - EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_Unknown, ValidStringMessage(), this)) - .Times(1); -} + // Calling the callback on the server side will result in the callback being called on the + // client side + api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, + "Some error message"); -// Test that registering a callback then wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) { - EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); - wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); + EXPECT_CALL(*mockDeviceLostCallback, + Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this)) + .Times(1); - EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true)); - wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); - FlushClient(); + FlushServer(); + } - EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this)) - .Times(1); - GetWireClient()->Disconnect(); -} - -// Test that registering a callback after wire disconnect calls the callback with -// DeviceLost. -TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) { - EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); - wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); - FlushClient(); - - GetWireClient()->Disconnect(); - - EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this)) - .Times(1); - wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); -} - -// Empty stack (We are emulating the errors that would be callback-ed from native). -TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) { - WGPUErrorCallback callback; - void* userdata; - EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) - .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true))); - wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); - FlushClient(); - - EXPECT_CALL(*mockDevicePopErrorScopeCallback, - Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this)) - .Times(1); - callback(WGPUErrorType_Validation, "No error scopes to pop", userdata); - FlushServer(); -} - -// Test the return wire for device lost callback -TEST_F(WireErrorCallbackTests, DeviceLostCallback) { - wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this); - - // Setting the error callback should stay on the client side and do nothing - FlushClient(); - - // Calling the callback on the server side will result in the callback being called on the - // client side - api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, - "Some error message"); - - EXPECT_CALL(*mockDeviceLostCallback, - Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this)) - .Times(1); - - FlushServer(); -} +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireExtensionTests.cpp b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp index 9e1ac58f0c..63a78c4835 100644 --- a/src/dawn/tests/unittests/wire/WireExtensionTests.cpp +++ b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp @@ -14,76 +14,81 @@ #include "dawn/tests/unittests/wire/WireTest.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireExtensionTests : public WireTest { - public: - WireExtensionTests() { + using testing::_; + using testing::Invoke; + using testing::NotNull; + using testing::Return; + using testing::Unused; + + class WireExtensionTests : public WireTest { + public: + WireExtensionTests() { + } + ~WireExtensionTests() override = default; + }; + + // Serialize/Deserializes a chained struct correctly. + TEST_F(WireExtensionTests, ChainedStruct) { + WGPUShaderModuleDescriptor shaderModuleDesc = {}; + WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); + WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); + FlushClient(); + + WGPUPrimitiveDepthClampingState clientExt = {}; + clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState; + clientExt.chain.next = nullptr; + clientExt.clampDepth = true; + + WGPURenderPipelineDescriptor renderPipelineDesc = {}; + renderPipelineDesc.vertex.module = shaderModule; + renderPipelineDesc.vertex.entryPoint = "main"; + renderPipelineDesc.primitive.nextInChain = &clientExt.chain; + + wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); + EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) + .WillOnce(Invoke( + [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { + const auto* ext = reinterpret_cast( + serverDesc->primitive.nextInChain); + EXPECT_EQ(ext->chain.sType, clientExt.chain.sType); + EXPECT_EQ(ext->clampDepth, true); + EXPECT_EQ(ext->chain.next, nullptr); + + return api.GetNewRenderPipeline(); + })); + FlushClient(); } - ~WireExtensionTests() override = default; -}; -// Serialize/Deserializes a chained struct correctly. -TEST_F(WireExtensionTests, ChainedStruct) { - WGPUShaderModuleDescriptor shaderModuleDesc = {}; - WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); - WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); - FlushClient(); + // Serialize/Deserializes multiple chained structs correctly. + TEST_F(WireExtensionTests, MutlipleChainedStructs) { + WGPUShaderModuleDescriptor shaderModuleDesc = {}; + WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); + WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); + FlushClient(); - WGPUPrimitiveDepthClampingState clientExt = {}; - clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState; - clientExt.chain.next = nullptr; - clientExt.clampDepth = true; + WGPUPrimitiveDepthClampingState clientExt2 = {}; + clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState; + clientExt2.chain.next = nullptr; + clientExt2.clampDepth = false; - WGPURenderPipelineDescriptor renderPipelineDesc = {}; - renderPipelineDesc.vertex.module = shaderModule; - renderPipelineDesc.vertex.entryPoint = "main"; - renderPipelineDesc.primitive.nextInChain = &clientExt.chain; + WGPUPrimitiveDepthClampingState clientExt1 = {}; + clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState; + clientExt1.chain.next = &clientExt2.chain; + clientExt1.clampDepth = true; - wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); - EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) - .WillOnce(Invoke( - [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { - const auto* ext = reinterpret_cast( - serverDesc->primitive.nextInChain); - EXPECT_EQ(ext->chain.sType, clientExt.chain.sType); - EXPECT_EQ(ext->clampDepth, true); - EXPECT_EQ(ext->chain.next, nullptr); + WGPURenderPipelineDescriptor renderPipelineDesc = {}; + renderPipelineDesc.vertex.module = shaderModule; + renderPipelineDesc.vertex.entryPoint = "main"; + renderPipelineDesc.primitive.nextInChain = &clientExt1.chain; - return api.GetNewRenderPipeline(); - })); - FlushClient(); -} - -// Serialize/Deserializes multiple chained structs correctly. -TEST_F(WireExtensionTests, MutlipleChainedStructs) { - WGPUShaderModuleDescriptor shaderModuleDesc = {}; - WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); - WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); - FlushClient(); - - WGPUPrimitiveDepthClampingState clientExt2 = {}; - clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState; - clientExt2.chain.next = nullptr; - clientExt2.clampDepth = false; - - WGPUPrimitiveDepthClampingState clientExt1 = {}; - clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState; - clientExt1.chain.next = &clientExt2.chain; - clientExt1.clampDepth = true; - - WGPURenderPipelineDescriptor renderPipelineDesc = {}; - renderPipelineDesc.vertex.module = shaderModule; - renderPipelineDesc.vertex.entryPoint = "main"; - renderPipelineDesc.primitive.nextInChain = &clientExt1.chain; - - wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); - EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) - .WillOnce(Invoke( - [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { + wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); + EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) + .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc) + -> WGPURenderPipeline { const auto* ext1 = reinterpret_cast( serverDesc->primitive.nextInChain); EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType); @@ -97,17 +102,17 @@ TEST_F(WireExtensionTests, MutlipleChainedStructs) { return api.GetNewRenderPipeline(); })); - FlushClient(); + FlushClient(); - // Swap the order of the chained structs. - renderPipelineDesc.primitive.nextInChain = &clientExt2.chain; - clientExt2.chain.next = &clientExt1.chain; - clientExt1.chain.next = nullptr; + // Swap the order of the chained structs. + renderPipelineDesc.primitive.nextInChain = &clientExt2.chain; + clientExt2.chain.next = &clientExt1.chain; + clientExt1.chain.next = nullptr; - wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); - EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) - .WillOnce(Invoke( - [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { + wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); + EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) + .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc) + -> WGPURenderPipeline { const auto* ext2 = reinterpret_cast( serverDesc->primitive.nextInChain); EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType); @@ -121,121 +126,123 @@ TEST_F(WireExtensionTests, MutlipleChainedStructs) { return api.GetNewRenderPipeline(); })); - FlushClient(); -} + FlushClient(); + } -// Test that a chained struct with Invalid sType passes through as Invalid. -TEST_F(WireExtensionTests, InvalidSType) { - WGPUShaderModuleDescriptor shaderModuleDesc = {}; - WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); - WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); - FlushClient(); + // Test that a chained struct with Invalid sType passes through as Invalid. + TEST_F(WireExtensionTests, InvalidSType) { + WGPUShaderModuleDescriptor shaderModuleDesc = {}; + WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); + WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); + FlushClient(); - WGPUPrimitiveDepthClampingState clientExt = {}; - clientExt.chain.sType = WGPUSType_Invalid; - clientExt.chain.next = nullptr; + WGPUPrimitiveDepthClampingState clientExt = {}; + clientExt.chain.sType = WGPUSType_Invalid; + clientExt.chain.next = nullptr; - WGPURenderPipelineDescriptor renderPipelineDesc = {}; - renderPipelineDesc.vertex.module = shaderModule; - renderPipelineDesc.vertex.entryPoint = "main"; - renderPipelineDesc.primitive.nextInChain = &clientExt.chain; + WGPURenderPipelineDescriptor renderPipelineDesc = {}; + renderPipelineDesc.vertex.module = shaderModule; + renderPipelineDesc.vertex.entryPoint = "main"; + renderPipelineDesc.primitive.nextInChain = &clientExt.chain; - wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); - EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) - .WillOnce(Invoke( - [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { - EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); - EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr); - return api.GetNewRenderPipeline(); - })); - FlushClient(); -} + wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); + EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) + .WillOnce(Invoke( + [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { + EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); + EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr); + return api.GetNewRenderPipeline(); + })); + FlushClient(); + } -// Test that a chained struct with unknown sType passes through as Invalid. -TEST_F(WireExtensionTests, UnknownSType) { - WGPUShaderModuleDescriptor shaderModuleDesc = {}; - WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); - WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); - FlushClient(); + // Test that a chained struct with unknown sType passes through as Invalid. + TEST_F(WireExtensionTests, UnknownSType) { + WGPUShaderModuleDescriptor shaderModuleDesc = {}; + WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); + WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); + FlushClient(); - WGPUPrimitiveDepthClampingState clientExt = {}; - clientExt.chain.sType = static_cast(-1); - clientExt.chain.next = nullptr; + WGPUPrimitiveDepthClampingState clientExt = {}; + clientExt.chain.sType = static_cast(-1); + clientExt.chain.next = nullptr; - WGPURenderPipelineDescriptor renderPipelineDesc = {}; - renderPipelineDesc.vertex.module = shaderModule; - renderPipelineDesc.vertex.entryPoint = "main"; - renderPipelineDesc.primitive.nextInChain = &clientExt.chain; + WGPURenderPipelineDescriptor renderPipelineDesc = {}; + renderPipelineDesc.vertex.module = shaderModule; + renderPipelineDesc.vertex.entryPoint = "main"; + renderPipelineDesc.primitive.nextInChain = &clientExt.chain; - wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); - EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) - .WillOnce(Invoke( - [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { - EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); - EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr); - return api.GetNewRenderPipeline(); - })); - FlushClient(); -} + wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); + EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) + .WillOnce(Invoke( + [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { + EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); + EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr); + return api.GetNewRenderPipeline(); + })); + FlushClient(); + } -// Test that if both an invalid and valid stype are passed on the chain, only the invalid -// sType passes through as Invalid. -TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) { - WGPUShaderModuleDescriptor shaderModuleDesc = {}; - WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); - WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); - FlushClient(); + // Test that if both an invalid and valid stype are passed on the chain, only the invalid + // sType passes through as Invalid. + TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) { + WGPUShaderModuleDescriptor shaderModuleDesc = {}; + WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); + WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); + FlushClient(); - WGPUPrimitiveDepthClampingState clientExt2 = {}; - clientExt2.chain.sType = WGPUSType_Invalid; - clientExt2.chain.next = nullptr; + WGPUPrimitiveDepthClampingState clientExt2 = {}; + clientExt2.chain.sType = WGPUSType_Invalid; + clientExt2.chain.next = nullptr; - WGPUPrimitiveDepthClampingState clientExt1 = {}; - clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState; - clientExt1.chain.next = &clientExt2.chain; - clientExt1.clampDepth = true; + WGPUPrimitiveDepthClampingState clientExt1 = {}; + clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState; + clientExt1.chain.next = &clientExt2.chain; + clientExt1.clampDepth = true; - WGPURenderPipelineDescriptor renderPipelineDesc = {}; - renderPipelineDesc.vertex.module = shaderModule; - renderPipelineDesc.vertex.entryPoint = "main"; - renderPipelineDesc.primitive.nextInChain = &clientExt1.chain; + WGPURenderPipelineDescriptor renderPipelineDesc = {}; + renderPipelineDesc.vertex.module = shaderModule; + renderPipelineDesc.vertex.entryPoint = "main"; + renderPipelineDesc.primitive.nextInChain = &clientExt1.chain; - wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); - EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) - .WillOnce(Invoke( - [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { - const auto* ext = reinterpret_cast( - serverDesc->primitive.nextInChain); - EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType); - EXPECT_EQ(ext->clampDepth, true); + wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); + EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) + .WillOnce(Invoke( + [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { + const auto* ext = reinterpret_cast( + serverDesc->primitive.nextInChain); + EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType); + EXPECT_EQ(ext->clampDepth, true); - EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid); - EXPECT_EQ(ext->chain.next->next, nullptr); - return api.GetNewRenderPipeline(); - })); - FlushClient(); + EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid); + EXPECT_EQ(ext->chain.next->next, nullptr); + return api.GetNewRenderPipeline(); + })); + FlushClient(); - // Swap the order of the chained structs. - renderPipelineDesc.primitive.nextInChain = &clientExt2.chain; - clientExt2.chain.next = &clientExt1.chain; - clientExt1.chain.next = nullptr; + // Swap the order of the chained structs. + renderPipelineDesc.primitive.nextInChain = &clientExt2.chain; + clientExt2.chain.next = &clientExt1.chain; + clientExt1.chain.next = nullptr; - wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); - EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) - .WillOnce(Invoke( - [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { - EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); + wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); + EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) + .WillOnce(Invoke( + [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { + EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); - const auto* ext = reinterpret_cast( - serverDesc->primitive.nextInChain->next); - EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType); - EXPECT_EQ(ext->clampDepth, true); - EXPECT_EQ(ext->chain.next, nullptr); + const auto* ext = reinterpret_cast( + serverDesc->primitive.nextInChain->next); + EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType); + EXPECT_EQ(ext->clampDepth, true); + EXPECT_EQ(ext->chain.next, nullptr); - return api.GetNewRenderPipeline(); - })); - FlushClient(); -} + return api.GetNewRenderPipeline(); + })); + FlushClient(); + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp index fd3b25813d..e12bdd6a95 100644 --- a/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp +++ b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp @@ -17,256 +17,273 @@ #include "dawn/wire/WireClient.h" #include "dawn/wire/WireServer.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireInjectDeviceTests : public WireTest { - public: - WireInjectDeviceTests() { - } - ~WireInjectDeviceTests() override = default; -}; + using testing::_; + using testing::Exactly; + using testing::Mock; + using testing::Return; -// Test that reserving and injecting a device makes calls on the client object forward to the -// server object correctly. -TEST_F(WireInjectDeviceTests, CallAfterReserveInject) { - ReservedDevice reservation = GetWireClient()->ReserveDevice(); + class WireInjectDeviceTests : public WireTest { + public: + WireInjectDeviceTests() { + } + ~WireInjectDeviceTests() override = default; + }; - WGPUDevice serverDevice = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); - - WGPUBufferDescriptor bufferDesc = {}; - wgpuDeviceCreateBuffer(reservation.device, &bufferDesc); - WGPUBuffer serverBuffer = api.GetNewBuffer(); - EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer)); - FlushClient(); - - // Called on shutdown. - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)) - .Times(Exactly(1)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)) - .Times(Exactly(1)); -} - -// Test that reserve correctly returns different IDs each time. -TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) { - ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); - ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); - - ASSERT_NE(reservation1.id, reservation2.id); - ASSERT_NE(reservation1.device, reservation2.device); -} - -// Test that injecting the same id without a destroy first fails. -TEST_F(WireInjectDeviceTests, InjectExistingID) { - ReservedDevice reservation = GetWireClient()->ReserveDevice(); - - WGPUDevice serverDevice = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); - - // ID already in use, call fails. - ASSERT_FALSE( - GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); - - // Called on shutdown. - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)) - .Times(Exactly(1)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)) - .Times(Exactly(1)); -} - -// Test that the server only borrows the device and does a single reference-release -TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) { - ReservedDevice reservation = GetWireClient()->ReserveDevice(); - - // Injecting the device adds a reference - WGPUDevice serverDevice = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); - - // Releasing the device removes a single reference and clears its error callbacks. - wgpuDeviceRelease(reservation.device); - EXPECT_CALL(api, DeviceRelease(serverDevice)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1); - FlushClient(); - - // Deleting the server doesn't release a second reference. - DeleteServer(); - Mock::VerifyAndClearExpectations(&api); -} - -// Test that it is an error to get the primary queue of a device before it has been -// injected on the server. -TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) { - ReservedDevice reservation = GetWireClient()->ReserveDevice(); - - wgpuDeviceGetQueue(reservation.device); - FlushClient(false); -} - -// Test that it is valid to get the primary queue of a device after it has been -// injected on the server. -TEST_F(WireInjectDeviceTests, GetQueueAfterInject) { - ReservedDevice reservation = GetWireClient()->ReserveDevice(); - - WGPUDevice serverDevice = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); - - wgpuDeviceGetQueue(reservation.device); - - WGPUQueue apiQueue = api.GetNewQueue(); - EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue)); - FlushClient(); - - // Called on shutdown. - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)) - .Times(Exactly(1)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)) - .Times(Exactly(1)); -} - -// Test that the list of live devices can be reflected using GetDevice. -TEST_F(WireInjectDeviceTests, ReflectLiveDevices) { - // Reserve two devices. - ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); - ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); - - // Inject both devices. - - WGPUDevice serverDevice1 = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice1)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation)); - - WGPUDevice serverDevice2 = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice2)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation)); - - // Test that both devices can be reflected. - ASSERT_EQ(serverDevice1, GetWireServer()->GetDevice(reservation1.id, reservation1.generation)); - ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation)); - - // Release the first device - wgpuDeviceRelease(reservation1.device); - EXPECT_CALL(api, DeviceRelease(serverDevice1)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1); - FlushClient(); - - // The first device should no longer reflect, but the second should - ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation)); - ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation)); - - // Called on shutdown. - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1); -} - -// This is a regression test where a second device reservation invalidated pointers into the -// KnownObjects std::vector of devices. The fix was to store pointers to heap allocated -// objects instead. -TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) { - // Reserve one device, inject it, and get the primary queue. - ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); - - WGPUDevice serverDevice1 = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice1)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation)); - - WGPUCommandEncoder commandEncoder = - wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr); - - WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder(); - EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _)) - .WillOnce(Return(serverCommandEncoder)); - FlushClient(); - - // Reserve a second device, and inject it. - ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); - - WGPUDevice serverDevice2 = api.GetNewDevice(); - EXPECT_CALL(api, DeviceReference(serverDevice2)); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _)); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _)); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _)); - ASSERT_TRUE( - GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation)); - - // Release the encoder. This should work without error because it stores a stable - // pointer to its device's list of child objects. On destruction, it removes itself from the - // list. - wgpuCommandEncoderRelease(commandEncoder); - EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder)); - FlushClient(); - - // Called on shutdown. - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1); - EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1); -} - -// Test that a device reservation can be reclaimed. This is necessary to -// avoid leaking ObjectIDs for reservations that are never injected. -TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) { - // Test that doing a reservation and full release is an error. - { + // Test that reserving and injecting a device makes calls on the client object forward to the + // server object correctly. + TEST_F(WireInjectDeviceTests, CallAfterReserveInject) { ReservedDevice reservation = GetWireClient()->ReserveDevice(); + + WGPUDevice serverDevice = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); + + WGPUBufferDescriptor bufferDesc = {}; + wgpuDeviceCreateBuffer(reservation.device, &bufferDesc); + WGPUBuffer serverBuffer = api.GetNewBuffer(); + EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer)); + FlushClient(); + + // Called on shutdown. + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + } + + // Test that reserve correctly returns different IDs each time. + TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) { + ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); + ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); + + ASSERT_NE(reservation1.id, reservation2.id); + ASSERT_NE(reservation1.device, reservation2.device); + } + + // Test that injecting the same id without a destroy first fails. + TEST_F(WireInjectDeviceTests, InjectExistingID) { + ReservedDevice reservation = GetWireClient()->ReserveDevice(); + + WGPUDevice serverDevice = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); + + // ID already in use, call fails. + ASSERT_FALSE( + GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); + + // Called on shutdown. + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + } + + // Test that the server only borrows the device and does a single reference-release + TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) { + ReservedDevice reservation = GetWireClient()->ReserveDevice(); + + // Injecting the device adds a reference + WGPUDevice serverDevice = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); + + // Releasing the device removes a single reference and clears its error callbacks. wgpuDeviceRelease(reservation.device); + EXPECT_CALL(api, DeviceRelease(serverDevice)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)) + .Times(1); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1); + FlushClient(); + + // Deleting the server doesn't release a second reference. + DeleteServer(); + Mock::VerifyAndClearExpectations(&api); + } + + // Test that it is an error to get the primary queue of a device before it has been + // injected on the server. + TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) { + ReservedDevice reservation = GetWireClient()->ReserveDevice(); + + wgpuDeviceGetQueue(reservation.device); FlushClient(false); } - // Test that doing a reservation and then reclaiming it recycles the ID. - { - ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); - GetWireClient()->ReclaimDeviceReservation(reservation1); + // Test that it is valid to get the primary queue of a device after it has been + // injected on the server. + TEST_F(WireInjectDeviceTests, GetQueueAfterInject) { + ReservedDevice reservation = GetWireClient()->ReserveDevice(); + WGPUDevice serverDevice = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation)); + + wgpuDeviceGetQueue(reservation.device); + + WGPUQueue apiQueue = api.GetNewQueue(); + EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue)); + FlushClient(); + + // Called on shutdown. + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)) + .Times(Exactly(1)); + } + + // Test that the list of live devices can be reflected using GetDevice. + TEST_F(WireInjectDeviceTests, ReflectLiveDevices) { + // Reserve two devices. + ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); - // The ID is the same, but the generation is still different. - ASSERT_EQ(reservation1.id, reservation2.id); - ASSERT_NE(reservation1.generation, reservation2.generation); + // Inject both devices. - // No errors should occur. + WGPUDevice serverDevice1 = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice1)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation)); + + WGPUDevice serverDevice2 = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice2)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation)); + + // Test that both devices can be reflected. + ASSERT_EQ(serverDevice1, + GetWireServer()->GetDevice(reservation1.id, reservation1.generation)); + ASSERT_EQ(serverDevice2, + GetWireServer()->GetDevice(reservation2.id, reservation2.generation)); + + // Release the first device + wgpuDeviceRelease(reservation1.device); + EXPECT_CALL(api, DeviceRelease(serverDevice1)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)) + .Times(1); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1); FlushClient(); + + // The first device should no longer reflect, but the second should + ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation)); + ASSERT_EQ(serverDevice2, + GetWireServer()->GetDevice(reservation2.id, reservation2.generation)); + + // Called on shutdown. + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)) + .Times(1); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1); } -} + + // This is a regression test where a second device reservation invalidated pointers into the + // KnownObjects std::vector of devices. The fix was to store pointers to heap allocated + // objects instead. + TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) { + // Reserve one device, inject it, and get the primary queue. + ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); + + WGPUDevice serverDevice1 = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice1)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation)); + + WGPUCommandEncoder commandEncoder = + wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr); + + WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder(); + EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _)) + .WillOnce(Return(serverCommandEncoder)); + FlushClient(); + + // Reserve a second device, and inject it. + ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); + + WGPUDevice serverDevice2 = api.GetNewDevice(); + EXPECT_CALL(api, DeviceReference(serverDevice2)); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _)); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _)); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _)); + ASSERT_TRUE( + GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation)); + + // Release the encoder. This should work without error because it stores a stable + // pointer to its device's list of child objects. On destruction, it removes itself from the + // list. + wgpuCommandEncoderRelease(commandEncoder); + EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder)); + FlushClient(); + + // Called on shutdown. + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)) + .Times(1); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1); + EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)) + .Times(1); + EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1); + EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1); + } + + // Test that a device reservation can be reclaimed. This is necessary to + // avoid leaking ObjectIDs for reservations that are never injected. + TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) { + // Test that doing a reservation and full release is an error. + { + ReservedDevice reservation = GetWireClient()->ReserveDevice(); + wgpuDeviceRelease(reservation.device); + FlushClient(false); + } + + // Test that doing a reservation and then reclaiming it recycles the ID. + { + ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); + GetWireClient()->ReclaimDeviceReservation(reservation1); + + ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); + + // The ID is the same, but the generation is still different. + ASSERT_EQ(reservation1.id, reservation2.id); + ASSERT_NE(reservation1.generation, reservation2.generation); + + // No errors should occur. + FlushClient(); + } + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp index eef7671bc9..64c8b1b8c7 100644 --- a/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp +++ b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp @@ -17,10 +17,11 @@ #include "dawn/wire/WireClient.h" #include "dawn/wire/WireServer.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { namespace { -namespace { + using testing::Mock; + using testing::NotNull; + using testing::Return; class WireInjectInstanceTests : public WireTest { public: @@ -116,4 +117,6 @@ namespace { } } -} // anonymous namespace + // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented. + // NOLINTNEXTLINE(readability/namespace) +}} // namespace dawn::wire:: diff --git a/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp index 5bcc8a688c..6ba058b554 100644 --- a/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp +++ b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp @@ -17,100 +17,103 @@ #include "dawn/wire/WireClient.h" #include "dawn/wire/WireServer.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireInjectSwapChainTests : public WireTest { - public: - WireInjectSwapChainTests() { - } - ~WireInjectSwapChainTests() override = default; -}; + using testing::Mock; -// Test that reserving and injecting a swapchain makes calls on the client object forward to the -// server object correctly. -TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) { - ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); + class WireInjectSwapChainTests : public WireTest { + public: + WireInjectSwapChainTests() { + } + ~WireInjectSwapChainTests() override = default; + }; - WGPUSwapChain apiSwapchain = api.GetNewSwapChain(); - EXPECT_CALL(api, SwapChainReference(apiSwapchain)); - ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, - reservation.generation, reservation.deviceId, - reservation.deviceGeneration)); - - wgpuSwapChainPresent(reservation.swapchain); - EXPECT_CALL(api, SwapChainPresent(apiSwapchain)); - FlushClient(); -} - -// Test that reserve correctly returns different IDs each time. -TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) { - ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device); - ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device); - - ASSERT_NE(reservation1.id, reservation2.id); - ASSERT_NE(reservation1.swapchain, reservation2.swapchain); -} - -// Test that injecting the same id without a destroy first fails. -TEST_F(WireInjectSwapChainTests, InjectExistingID) { - ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); - - WGPUSwapChain apiSwapchain = api.GetNewSwapChain(); - EXPECT_CALL(api, SwapChainReference(apiSwapchain)); - ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, - reservation.generation, reservation.deviceId, - reservation.deviceGeneration)); - - // ID already in use, call fails. - ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, - reservation.generation, reservation.deviceId, - reservation.deviceGeneration)); -} - -// Test that the server only borrows the swapchain and does a single reference-release -TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) { - ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); - - // Injecting the swapchain adds a reference - WGPUSwapChain apiSwapchain = api.GetNewSwapChain(); - EXPECT_CALL(api, SwapChainReference(apiSwapchain)); - ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, - reservation.generation, reservation.deviceId, - reservation.deviceGeneration)); - - // Releasing the swapchain removes a single reference. - wgpuSwapChainRelease(reservation.swapchain); - EXPECT_CALL(api, SwapChainRelease(apiSwapchain)); - FlushClient(); - - // Deleting the server doesn't release a second reference. - DeleteServer(); - Mock::VerifyAndClearExpectations(&api); -} - -// Test that a swapchain reservation can be reclaimed. This is necessary to -// avoid leaking ObjectIDs for reservations that are never injected. -TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) { - // Test that doing a reservation and full release is an error. - { + // Test that reserving and injecting a swapchain makes calls on the client object forward to the + // server object correctly. + TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) { ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); - wgpuSwapChainRelease(reservation.swapchain); - FlushClient(false); - } - // Test that doing a reservation and then reclaiming it recycles the ID. - { - ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device); - GetWireClient()->ReclaimSwapChainReservation(reservation1); + WGPUSwapChain apiSwapchain = api.GetNewSwapChain(); + EXPECT_CALL(api, SwapChainReference(apiSwapchain)); + ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); - ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device); - - // The ID is the same, but the generation is still different. - ASSERT_EQ(reservation1.id, reservation2.id); - ASSERT_NE(reservation1.generation, reservation2.generation); - - // No errors should occur. + wgpuSwapChainPresent(reservation.swapchain); + EXPECT_CALL(api, SwapChainPresent(apiSwapchain)); FlushClient(); } -} + + // Test that reserve correctly returns different IDs each time. + TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) { + ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device); + ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device); + + ASSERT_NE(reservation1.id, reservation2.id); + ASSERT_NE(reservation1.swapchain, reservation2.swapchain); + } + + // Test that injecting the same id without a destroy first fails. + TEST_F(WireInjectSwapChainTests, InjectExistingID) { + ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); + + WGPUSwapChain apiSwapchain = api.GetNewSwapChain(); + EXPECT_CALL(api, SwapChainReference(apiSwapchain)); + ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); + + // ID already in use, call fails. + ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); + } + + // Test that the server only borrows the swapchain and does a single reference-release + TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) { + ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); + + // Injecting the swapchain adds a reference + WGPUSwapChain apiSwapchain = api.GetNewSwapChain(); + EXPECT_CALL(api, SwapChainReference(apiSwapchain)); + ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); + + // Releasing the swapchain removes a single reference. + wgpuSwapChainRelease(reservation.swapchain); + EXPECT_CALL(api, SwapChainRelease(apiSwapchain)); + FlushClient(); + + // Deleting the server doesn't release a second reference. + DeleteServer(); + Mock::VerifyAndClearExpectations(&api); + } + + // Test that a swapchain reservation can be reclaimed. This is necessary to + // avoid leaking ObjectIDs for reservations that are never injected. + TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) { + // Test that doing a reservation and full release is an error. + { + ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); + wgpuSwapChainRelease(reservation.swapchain); + FlushClient(false); + } + + // Test that doing a reservation and then reclaiming it recycles the ID. + { + ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device); + GetWireClient()->ReclaimSwapChainReservation(reservation1); + + ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device); + + // The ID is the same, but the generation is still different. + ASSERT_EQ(reservation1.id, reservation2.id); + ASSERT_NE(reservation1.generation, reservation2.generation); + + // No errors should occur. + FlushClient(); + } + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp index b7f06e45ec..a15fd6c42b 100644 --- a/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp +++ b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp @@ -17,98 +17,106 @@ #include "dawn/wire/WireClient.h" #include "dawn/wire/WireServer.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireInjectTextureTests : public WireTest { - public: - WireInjectTextureTests() { - } - ~WireInjectTextureTests() override = default; -}; + using testing::Mock; + using testing::Return; -// Test that reserving and injecting a texture makes calls on the client object forward to the -// server object correctly. -TEST_F(WireInjectTextureTests, CallAfterReserveInject) { - ReservedTexture reservation = GetWireClient()->ReserveTexture(device); + class WireInjectTextureTests : public WireTest { + public: + WireInjectTextureTests() { + } + ~WireInjectTextureTests() override = default; + }; - WGPUTexture apiTexture = api.GetNewTexture(); - EXPECT_CALL(api, TextureReference(apiTexture)); - ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation, - reservation.deviceId, reservation.deviceGeneration)); - - wgpuTextureCreateView(reservation.texture, nullptr); - WGPUTextureView apiPlaceholderView = api.GetNewTextureView(); - EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiPlaceholderView)); - FlushClient(); -} - -// Test that reserve correctly returns different IDs each time. -TEST_F(WireInjectTextureTests, ReserveDifferentIDs) { - ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device); - ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device); - - ASSERT_NE(reservation1.id, reservation2.id); - ASSERT_NE(reservation1.texture, reservation2.texture); -} - -// Test that injecting the same id without a destroy first fails. -TEST_F(WireInjectTextureTests, InjectExistingID) { - ReservedTexture reservation = GetWireClient()->ReserveTexture(device); - - WGPUTexture apiTexture = api.GetNewTexture(); - EXPECT_CALL(api, TextureReference(apiTexture)); - ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation, - reservation.deviceId, reservation.deviceGeneration)); - - // ID already in use, call fails. - ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation, - reservation.deviceId, - reservation.deviceGeneration)); -} - -// Test that the server only borrows the texture and does a single reference-release -TEST_F(WireInjectTextureTests, InjectedTextureLifetime) { - ReservedTexture reservation = GetWireClient()->ReserveTexture(device); - - // Injecting the texture adds a reference - WGPUTexture apiTexture = api.GetNewTexture(); - EXPECT_CALL(api, TextureReference(apiTexture)); - ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation, - reservation.deviceId, reservation.deviceGeneration)); - - // Releasing the texture removes a single reference. - wgpuTextureRelease(reservation.texture); - EXPECT_CALL(api, TextureRelease(apiTexture)); - FlushClient(); - - // Deleting the server doesn't release a second reference. - DeleteServer(); - Mock::VerifyAndClearExpectations(&api); -} - -// Test that a texture reservation can be reclaimed. This is necessary to -// avoid leaking ObjectIDs for reservations that are never injected. -TEST_F(WireInjectTextureTests, ReclaimTextureReservation) { - // Test that doing a reservation and full release is an error. - { + // Test that reserving and injecting a texture makes calls on the client object forward to the + // server object correctly. + TEST_F(WireInjectTextureTests, CallAfterReserveInject) { ReservedTexture reservation = GetWireClient()->ReserveTexture(device); - wgpuTextureRelease(reservation.texture); - FlushClient(false); - } - // Test that doing a reservation and then reclaiming it recycles the ID. - { - ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device); - GetWireClient()->ReclaimTextureReservation(reservation1); + WGPUTexture apiTexture = api.GetNewTexture(); + EXPECT_CALL(api, TextureReference(apiTexture)); + ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); - ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device); - - // The ID is the same, but the generation is still different. - ASSERT_EQ(reservation1.id, reservation2.id); - ASSERT_NE(reservation1.generation, reservation2.generation); - - // No errors should occur. + wgpuTextureCreateView(reservation.texture, nullptr); + WGPUTextureView apiPlaceholderView = api.GetNewTextureView(); + EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)) + .WillOnce(Return(apiPlaceholderView)); FlushClient(); } -} + + // Test that reserve correctly returns different IDs each time. + TEST_F(WireInjectTextureTests, ReserveDifferentIDs) { + ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device); + ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device); + + ASSERT_NE(reservation1.id, reservation2.id); + ASSERT_NE(reservation1.texture, reservation2.texture); + } + + // Test that injecting the same id without a destroy first fails. + TEST_F(WireInjectTextureTests, InjectExistingID) { + ReservedTexture reservation = GetWireClient()->ReserveTexture(device); + + WGPUTexture apiTexture = api.GetNewTexture(); + EXPECT_CALL(api, TextureReference(apiTexture)); + ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); + + // ID already in use, call fails. + ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); + } + + // Test that the server only borrows the texture and does a single reference-release + TEST_F(WireInjectTextureTests, InjectedTextureLifetime) { + ReservedTexture reservation = GetWireClient()->ReserveTexture(device); + + // Injecting the texture adds a reference + WGPUTexture apiTexture = api.GetNewTexture(); + EXPECT_CALL(api, TextureReference(apiTexture)); + ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, + reservation.generation, reservation.deviceId, + reservation.deviceGeneration)); + + // Releasing the texture removes a single reference. + wgpuTextureRelease(reservation.texture); + EXPECT_CALL(api, TextureRelease(apiTexture)); + FlushClient(); + + // Deleting the server doesn't release a second reference. + DeleteServer(); + Mock::VerifyAndClearExpectations(&api); + } + + // Test that a texture reservation can be reclaimed. This is necessary to + // avoid leaking ObjectIDs for reservations that are never injected. + TEST_F(WireInjectTextureTests, ReclaimTextureReservation) { + // Test that doing a reservation and full release is an error. + { + ReservedTexture reservation = GetWireClient()->ReserveTexture(device); + wgpuTextureRelease(reservation.texture); + FlushClient(false); + } + + // Test that doing a reservation and then reclaiming it recycles the ID. + { + ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device); + GetWireClient()->ReclaimTextureReservation(reservation1); + + ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device); + + // The ID is the same, but the generation is still different. + ASSERT_EQ(reservation1.id, reservation2.id); + ASSERT_NE(reservation1.generation, reservation2.generation); + + // No errors should occur. + FlushClient(); + } + } + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp index 7a98c27e6a..9ef1e29cc5 100644 --- a/src/dawn/tests/unittests/wire/WireInstanceTests.cpp +++ b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp @@ -23,10 +23,16 @@ #include "webgpu/webgpu_cpp.h" -namespace { +namespace dawn::wire { namespace { - using namespace testing; - using namespace dawn::wire; + using testing::Invoke; + using testing::InvokeWithoutArgs; + using testing::MockCallback; + using testing::NotNull; + using testing::Return; + using testing::SetArgPointee; + using testing::StrEq; + using testing::WithArg; class WireInstanceBasicTest : public WireTest {}; class WireInstanceTests : public WireTest { @@ -284,4 +290,6 @@ namespace { GetWireClient()->Disconnect(); } -} // anonymous namespace + // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented. + // NOLINTNEXTLINE(readability/namespace) +}} // namespace dawn::wire:: diff --git a/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp index 597f4aca9d..484c867d48 100644 --- a/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp +++ b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp @@ -19,1036 +19,1063 @@ #include "dawn/wire/client/ClientMemoryTransferService_mock.h" #include "dawn/wire/server/ServerMemoryTransferService_mock.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -namespace { + using testing::_; + using testing::Eq; + using testing::InvokeWithoutArgs; + using testing::Mock; + using testing::Pointee; + using testing::Return; + using testing::StrictMock; + using testing::WithArg; - // Mock class to add expectations on the wire calling callbacks - class MockBufferMapCallback { + namespace { + + // Mock class to add expectations on the wire calling callbacks + class MockBufferMapCallback { + public: + MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata)); + }; + + std::unique_ptr> mockBufferMapCallback; + void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) { + mockBufferMapCallback->Call(status, userdata); + } + + } // anonymous namespace + + // WireMemoryTransferServiceTests test the MemoryTransferService with buffer mapping. + // They test the basic success and error cases for buffer mapping, and they test + // mocked failures of each fallible MemoryTransferService method that an embedder + // could implement. + // The test harness defines multiple helpers for expecting operations on Read/Write handles + // and for mocking failures. The helpers are designed such that for a given run of a test, + // a Serialization expection has a corresponding Deserialization expectation for which the + // serialized data must match. + // There are tests which check for Success for every mapping operation which mock an entire + // mapping operation from map to unmap, and add all MemoryTransferService expectations. Tests + // which check for errors perform the same mapping operations but insert mocked failures for + // various mapping or MemoryTransferService operations. + class WireMemoryTransferServiceTests : public WireTest { public: - MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata)); + WireMemoryTransferServiceTests() { + } + ~WireMemoryTransferServiceTests() override = default; + + client::MemoryTransferService* GetClientMemoryTransferService() override { + return &clientMemoryTransferService; + } + + server::MemoryTransferService* GetServerMemoryTransferService() override { + return &serverMemoryTransferService; + } + + void SetUp() override { + WireTest::SetUp(); + + mockBufferMapCallback = std::make_unique>(); + + // TODO(enga): Make this thread-safe. + mBufferContent++; + mMappedBufferContent = 0; + mUpdatedBufferContent++; + mSerializeCreateInfo++; + mReadHandleSerializeDataInfo++; + mWriteHandleSerializeDataInfo++; + } + + void TearDown() override { + WireTest::TearDown(); + + // Delete mock so that expectations are checked + mockBufferMapCallback = nullptr; + } + + void FlushClient(bool success = true) { + WireTest::FlushClient(success); + Mock::VerifyAndClearExpectations(&serverMemoryTransferService); + } + + void FlushServer(bool success = true) { + WireTest::FlushServer(success); + + Mock::VerifyAndClearExpectations(&mockBufferMapCallback); + Mock::VerifyAndClearExpectations(&clientMemoryTransferService); + } + + protected: + using ClientReadHandle = client::MockMemoryTransferService::MockReadHandle; + using ServerReadHandle = server::MockMemoryTransferService::MockReadHandle; + using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle; + using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle; + + std::pair CreateBuffer( + WGPUBufferUsage usage = WGPUBufferUsage_None) { + WGPUBufferDescriptor descriptor = {}; + descriptor.size = kBufferSize; + descriptor.usage = usage; + + WGPUBuffer apiBuffer = api.GetNewBuffer(); + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)) + .WillOnce(Return(apiBuffer)) + .RetiresOnSaturation(); + + return std::make_pair(apiBuffer, buffer); + } + + std::pair CreateBufferMapped( + WGPUBufferUsage usage = WGPUBufferUsage_None) { + WGPUBufferDescriptor descriptor = {}; + descriptor.size = sizeof(mBufferContent); + descriptor.mappedAtCreation = true; + descriptor.usage = usage; + + WGPUBuffer apiBuffer = api.GetNewBuffer(); + + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, sizeof(mBufferContent))) + .WillOnce(Return(&mMappedBufferContent)); + + return std::make_pair(apiBuffer, buffer); + } + + ClientReadHandle* ExpectReadHandleCreation() { + // Create the handle first so we can use it in later expectations. + ClientReadHandle* handle = clientMemoryTransferService.NewReadHandle(); + + EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent))) + .WillOnce(InvokeWithoutArgs([=]() { return handle; })); + + return handle; + } + + void MockReadHandleCreationFailure() { + EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent))) + .WillOnce(InvokeWithoutArgs([=]() { return nullptr; })); + } + + void ExpectReadHandleSerialization(ClientReadHandle* handle) { + EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreateSize(handle)) + .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); })); + EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreate(handle, _)) + .WillOnce(WithArg<1>([&](void* serializePointer) { + memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo)); + return sizeof(mSerializeCreateInfo); + })); + } + + ServerReadHandle* ExpectServerReadHandleDeserialize() { + // Create the handle first so we can use it in later expectations. + ServerReadHandle* handle = serverMemoryTransferService.NewReadHandle(); + + EXPECT_CALL(serverMemoryTransferService, + OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)), + sizeof(mSerializeCreateInfo), _)) + .WillOnce(WithArg<2>([=](server::MemoryTransferService::ReadHandle** readHandle) { + *readHandle = handle; + return true; + })); + + return handle; + } + + void MockServerReadHandleDeserializeFailure() { + EXPECT_CALL(serverMemoryTransferService, + OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)), + sizeof(mSerializeCreateInfo), _)) + .WillOnce(InvokeWithoutArgs([&]() { return false; })); + } + + void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) { + EXPECT_CALL(serverMemoryTransferService, + OnReadHandleSizeOfSerializeDataUpdate(handle, _, _)) + .WillOnce( + InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); })); + EXPECT_CALL(serverMemoryTransferService, + OnReadHandleSerializeDataUpdate(handle, _, _, _, _)) + .WillOnce(WithArg<4>([&](void* serializePointer) { + memcpy(serializePointer, &mReadHandleSerializeDataInfo, + sizeof(mReadHandleSerializeDataInfo)); + return sizeof(mReadHandleSerializeDataInfo); + })); + } + + void ExpectClientReadHandleDeserializeDataUpdate(ClientReadHandle* handle, + uint32_t* mappedData) { + EXPECT_CALL( + clientMemoryTransferService, + OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)), + sizeof(mReadHandleSerializeDataInfo), _, _)) + .WillOnce(Return(true)); + } + + void MockClientReadHandleDeserializeDataUpdateFailure(ClientReadHandle* handle) { + EXPECT_CALL( + clientMemoryTransferService, + OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)), + sizeof(mReadHandleSerializeDataInfo), _, _)) + .WillOnce(Return(false)); + } + + ClientWriteHandle* ExpectWriteHandleCreation(bool mappedAtCreation) { + // Create the handle first so we can use it in later expectations. + ClientWriteHandle* handle = clientMemoryTransferService.NewWriteHandle(); + + EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent))) + .WillOnce(InvokeWithoutArgs([=]() { return handle; })); + if (mappedAtCreation) { + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(handle)) + .WillOnce(Return(&mBufferContent)); + } + + return handle; + } + + void MockWriteHandleCreationFailure() { + EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent))) + .WillOnce(InvokeWithoutArgs([=]() { return nullptr; })); + } + + void ExpectWriteHandleSerialization(ClientWriteHandle* handle) { + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreateSize(handle)) + .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); })); + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreate(handle, _)) + .WillOnce(WithArg<1>([&](void* serializePointer) { + memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo)); + return sizeof(mSerializeCreateInfo); + })); + } + + ServerWriteHandle* ExpectServerWriteHandleDeserialization() { + // Create the handle first so it can be used in later expectations. + ServerWriteHandle* handle = serverMemoryTransferService.NewWriteHandle(); + + EXPECT_CALL(serverMemoryTransferService, + OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)), + sizeof(mSerializeCreateInfo), _)) + .WillOnce(WithArg<2>([=](server::MemoryTransferService::WriteHandle** writeHandle) { + *writeHandle = handle; + return true; + })); + + return handle; + } + + void MockServerWriteHandleDeserializeFailure() { + EXPECT_CALL(serverMemoryTransferService, + OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)), + sizeof(mSerializeCreateInfo), _)) + .WillOnce(Return(false)); + } + + void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) { + EXPECT_CALL(clientMemoryTransferService, + OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _)) + .WillOnce( + InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); })); + EXPECT_CALL(clientMemoryTransferService, + OnWriteHandleSerializeDataUpdate(handle, _, _, _)) + .WillOnce(WithArg<1>([&](void* serializePointer) { + memcpy(serializePointer, &mWriteHandleSerializeDataInfo, + sizeof(mWriteHandleSerializeDataInfo)); + return sizeof(mWriteHandleSerializeDataInfo); + })); + } + + void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle, + uint32_t expectedData) { + EXPECT_CALL(serverMemoryTransferService, + OnWriteHandleDeserializeDataUpdate( + handle, Pointee(Eq(mWriteHandleSerializeDataInfo)), + sizeof(mWriteHandleSerializeDataInfo), _, _)) + .WillOnce(Return(true)); + } + + void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) { + EXPECT_CALL(serverMemoryTransferService, + OnWriteHandleDeserializeDataUpdate( + handle, Pointee(Eq(mWriteHandleSerializeDataInfo)), + sizeof(mWriteHandleSerializeDataInfo), _, _)) + .WillOnce(Return(false)); + } + + // Arbitrary values used within tests to check if serialized data is correctly passed + // between the client and server. The static data changes between runs of the tests and + // test expectations will check that serialized values are passed to the respective + // deserialization function. + static uint32_t mSerializeCreateInfo; + static uint32_t mReadHandleSerializeDataInfo; + static uint32_t mWriteHandleSerializeDataInfo; + + // Represents the buffer contents for the test. + static uint32_t mBufferContent; + + static constexpr size_t kBufferSize = sizeof(mBufferContent); + + // The client's zero-initialized buffer for writing. + uint32_t mMappedBufferContent = 0; + + // |mMappedBufferContent| should be set equal to |mUpdatedBufferContent| when the client + // performs a write. Test expectations should check that |mBufferContent == + // mUpdatedBufferContent| after all writes are flushed. + static uint32_t mUpdatedBufferContent; + + StrictMock serverMemoryTransferService; + StrictMock clientMemoryTransferService; }; - std::unique_ptr> mockBufferMapCallback; - void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) { - mockBufferMapCallback->Call(status, userdata); + uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337; + uint32_t WireMemoryTransferServiceTests::mUpdatedBufferContent = 2349; + uint32_t WireMemoryTransferServiceTests::mSerializeCreateInfo = 4242; + uint32_t WireMemoryTransferServiceTests::mReadHandleSerializeDataInfo = 1394; + uint32_t WireMemoryTransferServiceTests::mWriteHandleSerializeDataInfo = 1235; + + // Test successful mapping for reading. + TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + // The client should create and serialize a ReadHandle on creation. + ClientReadHandle* clientHandle = ExpectReadHandleCreation(); + ExpectReadHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); + + // The server should deserialize the read handle from the client and then serialize + // an initialization message. + ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); + + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // The handle serialize data update on mapAsync cmd + ExpectServerReadHandleSerializeDataUpdate(serverHandle); + + // Mock a successful callback + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle)) + .WillOnce(Return(&mBufferContent)); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&mBufferContent)); + + FlushClient(); + + // The client receives a successful callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + + // The client should receive the handle data update message from the server. + ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent); + + FlushServer(); + + wgpuBufferUnmap(buffer); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); } -} // anonymous namespace + // Test ReadHandle destroy behavior + TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroy) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; -// WireMemoryTransferServiceTests test the MemoryTransferService with buffer mapping. -// They test the basic success and error cases for buffer mapping, and they test -// mocked failures of each fallible MemoryTransferService method that an embedder -// could implement. -// The test harness defines multiple helpers for expecting operations on Read/Write handles -// and for mocking failures. The helpers are designed such that for a given run of a test, -// a Serialization expection has a corresponding Deserialization expectation for which the -// serialized data must match. -// There are tests which check for Success for every mapping operation which mock an entire mapping -// operation from map to unmap, and add all MemoryTransferService expectations. -// Tests which check for errors perform the same mapping operations but insert mocked failures for -// various mapping or MemoryTransferService operations. -class WireMemoryTransferServiceTests : public WireTest { - public: - WireMemoryTransferServiceTests() { - } - ~WireMemoryTransferServiceTests() override = default; + // The client should create and serialize a ReadHandle on creation. + ClientReadHandle* clientHandle = ExpectReadHandleCreation(); + ExpectReadHandleSerialization(clientHandle); - client::MemoryTransferService* GetClientMemoryTransferService() override { - return &clientMemoryTransferService; + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); + + // The server should deserialize the read handle from the client and then serialize + // an initialization message. + ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); + + FlushClient(); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); + wgpuBufferDestroy(buffer); + EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); + EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); + + FlushClient(); } - server::MemoryTransferService* GetServerMemoryTransferService() override { - return &serverMemoryTransferService; + // Test unsuccessful mapping for reading. + TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + // The client should create and serialize a ReadHandle on creation. + ClientReadHandle* clientHandle = ExpectReadHandleCreation(); + ExpectReadHandleSerialization(clientHandle); + + // The server should deserialize the ReadHandle from the client. + ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // Mock a failed callback. + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); + + FlushClient(); + + // The client receives an error callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + + FlushServer(); + + wgpuBufferUnmap(buffer); + + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); } - void SetUp() override { - WireTest::SetUp(); + // Test ReadHandle creation failure. + TEST_F(WireMemoryTransferServiceTests, BufferMapReadHandleCreationFailure) { + // Mock a ReadHandle creation failure + MockReadHandleCreationFailure(); - mockBufferMapCallback = std::make_unique>(); - - // TODO(enga): Make this thread-safe. - mBufferContent++; - mMappedBufferContent = 0; - mUpdatedBufferContent++; - mSerializeCreateInfo++; - mReadHandleSerializeDataInfo++; - mWriteHandleSerializeDataInfo++; - } - - void TearDown() override { - WireTest::TearDown(); - - // Delete mock so that expectations are checked - mockBufferMapCallback = nullptr; - } - - void FlushClient(bool success = true) { - WireTest::FlushClient(success); - Mock::VerifyAndClearExpectations(&serverMemoryTransferService); - } - - void FlushServer(bool success = true) { - WireTest::FlushServer(success); - - Mock::VerifyAndClearExpectations(&mockBufferMapCallback); - Mock::VerifyAndClearExpectations(&clientMemoryTransferService); - } - - protected: - using ClientReadHandle = client::MockMemoryTransferService::MockReadHandle; - using ServerReadHandle = server::MockMemoryTransferService::MockReadHandle; - using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle; - using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle; - - std::pair CreateBuffer(WGPUBufferUsage usage = WGPUBufferUsage_None) { WGPUBufferDescriptor descriptor = {}; descriptor.size = kBufferSize; - descriptor.usage = usage; + descriptor.usage = WGPUBufferUsage_MapRead; - WGPUBuffer apiBuffer = api.GetNewBuffer(); - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); - - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)) - .WillOnce(Return(apiBuffer)) - .RetiresOnSaturation(); - - return std::make_pair(apiBuffer, buffer); + wgpuDeviceCreateBuffer(device, &descriptor); } - std::pair CreateBufferMapped( - WGPUBufferUsage usage = WGPUBufferUsage_None) { + // Test MapRead DeserializeReadHandle failure. + TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeReadHandleFailure) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + // The client should create and serialize a ReadHandle on mapping for reading.. + ClientReadHandle* clientHandle = ExpectReadHandleCreation(); + ExpectReadHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); + + // Mock a Deserialization failure. + MockServerReadHandleDeserializeFailure(); + + FlushClient(false); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); + } + + // Test read handle DeserializeDataUpdate failure. + TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + // The client should create and serialize a ReadHandle on mapping for reading. + ClientReadHandle* clientHandle = ExpectReadHandleCreation(); + ExpectReadHandleSerialization(clientHandle); + + // The server should deserialize the read handle from the client and then serialize + // an initialization message. + ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // The handle serialize data update on mapAsync cmd + ExpectServerReadHandleSerializeDataUpdate(serverHandle); + + // Mock a successful callback + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&mBufferContent)); + + FlushClient(); + + // The client should receive the handle data update message from the server. + // Mock a deserialization failure. + MockClientReadHandleDeserializeDataUpdateFailure(clientHandle); + + // Failed deserialization is a fatal failure and the client synchronously receives a + // DEVICE_LOST callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, _)).Times(1); + + FlushServer(false); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); + } + + // Test mapping for reading destroying the buffer before unmapping on the client side. + TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + // The client should create and serialize a ReadHandle on mapping for reading.. + ClientReadHandle* clientHandle = ExpectReadHandleCreation(); + ExpectReadHandleSerialization(clientHandle); + + // The server should deserialize the read handle from the client and then serialize + // an initialization message. + ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // The handle serialize data update on mapAsync cmd + ExpectServerReadHandleSerializeDataUpdate(serverHandle); + + // Mock a successful callback + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle)) + .WillOnce(Return(&mBufferContent)); + EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&mBufferContent)); + + FlushClient(); + + // The client receives a successful callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + + // The client should receive the handle data update message from the server. + ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent); + + FlushServer(); + + // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping + // immediately, both in the client and server side. + { + EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); + wgpuBufferDestroy(buffer); + + EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); + EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); + FlushClient(); + + // The handle is already destroyed so unmap only results in a server unmap call. + wgpuBufferUnmap(buffer); + + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + FlushClient(); + } + } + + // Test successful mapping for writing. + TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); + ExpectWriteHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); + + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); + + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // Mock a successful callback. + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle)) + .WillOnce(Return(&mBufferContent)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&mMappedBufferContent)); + + FlushClient(); + + // The client receives a successful callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + + FlushServer(); + + // The client writes to the handle contents. + mMappedBufferContent = mUpdatedBufferContent; + + // The client will then serialize data update and destroy the handle on Unmap() + ExpectClientWriteHandleSerializeDataUpdate(clientHandle); + + wgpuBufferUnmap(buffer); + + // The server deserializes the data update message. + ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent); + + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); + } + + // Test WriteHandle destroy behavior + TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroy) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); + ExpectWriteHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); + + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); + + FlushClient(); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + wgpuBufferDestroy(buffer); + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); + EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); + + FlushClient(); + } + + // Test unsuccessful MapWrite. + TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + // The client should create and serialize a WriteHandle on buffer creation with MapWrite + // usage. + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); + ExpectWriteHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); + + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); + + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // Mock an error callback. + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); + })); + + FlushClient(); + + // The client receives an error callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); + + FlushServer(); + + wgpuBufferUnmap(buffer); + + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); + } + + // Test WriteHandle creation failure. + TEST_F(WireMemoryTransferServiceTests, BufferMapWriteHandleCreationFailure) { + // Mock a WriteHandle creation failure + MockWriteHandleCreationFailure(); + + WGPUBufferDescriptor descriptor = {}; + descriptor.size = kBufferSize; + descriptor.usage = WGPUBufferUsage_MapWrite; + + wgpuDeviceCreateBuffer(device, &descriptor); + } + + // Test MapWrite DeserializeWriteHandle failure. + TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailure) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + // The client should create and serialize a WriteHandle on buffer creation with MapWrite + // usage. + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); + ExpectWriteHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); + + // Mock a deserialization failure. + MockServerWriteHandleDeserializeFailure(); + + FlushClient(false); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + } + + // Test MapWrite DeserializeDataUpdate failure. + TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailure) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); + ExpectWriteHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); + + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); + + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // Mock a successful callback. + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle)) + .WillOnce(Return(&mBufferContent)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&mMappedBufferContent)); + + FlushClient(); + + // The client receives a success callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + + FlushServer(); + + // The client writes to the handle contents. + mMappedBufferContent = mUpdatedBufferContent; + + // The client will then serialize data update + ExpectClientWriteHandleSerializeDataUpdate(clientHandle); + + wgpuBufferUnmap(buffer); + + // The server deserializes the data update message. Mock a deserialization failure. + MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle); + + FlushClient(false); + + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); + } + + // Test MapWrite destroying the buffer before unmapping on the client side. + TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) { + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); + ExpectWriteHandleSerialization(clientHandle); + + std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); + + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); + + FlushClient(); + + wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, + nullptr); + + // Mock a successful callback. + EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); + })); + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle)) + .WillOnce(Return(&mBufferContent)); + EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) + .WillOnce(Return(&mMappedBufferContent)); + + FlushClient(); + + // The client receives a successful callback. + EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + + FlushServer(); + + // The client writes to the handle contents. + mMappedBufferContent = mUpdatedBufferContent; + + // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping + // immediately, both in the client and server side. + { + // The handle is destroyed once the buffer is destroyed. + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + + wgpuBufferDestroy(buffer); + + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); + EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); + FlushClient(); + + // The handle is already destroyed so unmap only results in a server unmap call. + wgpuBufferUnmap(buffer); + + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + FlushClient(); + } + } + + // Test successful buffer creation with mappedAtCreation = true. + TEST_F(WireMemoryTransferServiceTests, MappedAtCreationSuccess) { + // The client should create and serialize a WriteHandle on createBufferMapped. + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); + ExpectWriteHandleSerialization(clientHandle); + + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); + + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + std::tie(apiBuffer, buffer) = CreateBufferMapped(); + FlushClient(); + + // Update the mapped contents. + mMappedBufferContent = mUpdatedBufferContent; + + // When the client Unmaps the buffer, it will serialize data update writes to the handle and + // destroy it. + ExpectClientWriteHandleSerializeDataUpdate(clientHandle); + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + + wgpuBufferUnmap(buffer); + + // The server deserializes the data update message. + ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent); + + // After the handle is updated it can be destroyed. + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + + FlushClient(); + } + + // Test buffer creation with mappedAtCreation WriteHandle creation failure. + TEST_F(WireMemoryTransferServiceTests, MappedAtCreationWriteHandleCreationFailure) { + // Mock a WriteHandle creation failure + MockWriteHandleCreationFailure(); + + WGPUBufferDescriptor descriptor = {}; + descriptor.size = sizeof(mBufferContent); + descriptor.mappedAtCreation = true; + + WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, sizeof(mBufferContent))); + } + + // Test buffer creation with mappedAtCreation DeserializeWriteHandle failure. + TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeWriteHandleFailure) { + // The client should create and serialize a WriteHandle on createBufferMapped. + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); + ExpectWriteHandleSerialization(clientHandle); + + // The server should then deserialize the WriteHandle from the client. + MockServerWriteHandleDeserializeFailure(); + WGPUBufferDescriptor descriptor = {}; descriptor.size = sizeof(mBufferContent); descriptor.mappedAtCreation = true; - descriptor.usage = usage; WGPUBuffer apiBuffer = api.GetNewBuffer(); - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); + wgpuDeviceCreateBuffer(device, &descriptor); EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, sizeof(mBufferContent))) - .WillOnce(Return(&mMappedBufferContent)); + // Now bufferGetMappedRange won't be called if deserialize writeHandle fails - return std::make_pair(apiBuffer, buffer); + FlushClient(false); + + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); } - ClientReadHandle* ExpectReadHandleCreation() { - // Create the handle first so we can use it in later expectations. - ClientReadHandle* handle = clientMemoryTransferService.NewReadHandle(); + // Test buffer creation with mappedAtCreation = true DeserializeDataUpdate failure. + TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeDataUpdateFailure) { + // The client should create and serialize a WriteHandle on createBufferMapped. + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); + ExpectWriteHandleSerialization(clientHandle); - EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent))) - .WillOnce(InvokeWithoutArgs([=]() { return handle; })); + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - return handle; + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + std::tie(apiBuffer, buffer) = CreateBufferMapped(); + FlushClient(); + + // Update the mapped contents. + mMappedBufferContent = mUpdatedBufferContent; + + // When the client Unmaps the buffer, it will serialize data update writes to the handle and + // destroy it. + ExpectClientWriteHandleSerializeDataUpdate(clientHandle); + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + + wgpuBufferUnmap(buffer); + + // The server deserializes the data update message. Mock a deserialization failure. + MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle); + + FlushClient(false); + + // Failed BufferUpdateMappedData cmd will early return so BufferUnmap is not processed. + // The server side writeHandle is destructed at buffer destruction. + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); } - void MockReadHandleCreationFailure() { - EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent))) - .WillOnce(InvokeWithoutArgs([=]() { return nullptr; })); - } + // Test mappedAtCreation=true destroying the buffer before unmapping on the client side. + TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) { + // The client should create and serialize a WriteHandle on createBufferMapped. + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); + ExpectWriteHandleSerialization(clientHandle); - void ExpectReadHandleSerialization(ClientReadHandle* handle) { - EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreateSize(handle)) - .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); })); - EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreate(handle, _)) - .WillOnce(WithArg<1>([&](void* serializePointer) { - memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo)); - return sizeof(mSerializeCreateInfo); - })); - } + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - ServerReadHandle* ExpectServerReadHandleDeserialize() { - // Create the handle first so we can use it in later expectations. - ServerReadHandle* handle = serverMemoryTransferService.NewReadHandle(); + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + std::tie(apiBuffer, buffer) = CreateBufferMapped(); + FlushClient(); - EXPECT_CALL(serverMemoryTransferService, - OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)), - sizeof(mSerializeCreateInfo), _)) - .WillOnce(WithArg<2>([=](server::MemoryTransferService::ReadHandle** readHandle) { - *readHandle = handle; - return true; - })); + // Update the mapped contents. + mMappedBufferContent = mUpdatedBufferContent; - return handle; - } + // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping + // immediately, both in the client and server side. + { + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); + wgpuBufferDestroy(buffer); - void MockServerReadHandleDeserializeFailure() { - EXPECT_CALL(serverMemoryTransferService, - OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)), - sizeof(mSerializeCreateInfo), _)) - .WillOnce(InvokeWithoutArgs([&]() { return false; })); - } + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); + EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); + FlushClient(); - void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) { - EXPECT_CALL(serverMemoryTransferService, - OnReadHandleSizeOfSerializeDataUpdate(handle, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); })); - EXPECT_CALL(serverMemoryTransferService, - OnReadHandleSerializeDataUpdate(handle, _, _, _, _)) - .WillOnce(WithArg<4>([&](void* serializePointer) { - memcpy(serializePointer, &mReadHandleSerializeDataInfo, - sizeof(mReadHandleSerializeDataInfo)); - return sizeof(mReadHandleSerializeDataInfo); - })); - } + // The handle is already destroyed so unmap only results in a server unmap call. + wgpuBufferUnmap(buffer); - void ExpectClientReadHandleDeserializeDataUpdate(ClientReadHandle* handle, - uint32_t* mappedData) { - EXPECT_CALL( - clientMemoryTransferService, - OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)), - sizeof(mReadHandleSerializeDataInfo), _, _)) - .WillOnce(Return(true)); - } - - void MockClientReadHandleDeserializeDataUpdateFailure(ClientReadHandle* handle) { - EXPECT_CALL( - clientMemoryTransferService, - OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)), - sizeof(mReadHandleSerializeDataInfo), _, _)) - .WillOnce(Return(false)); - } - - ClientWriteHandle* ExpectWriteHandleCreation(bool mappedAtCreation) { - // Create the handle first so we can use it in later expectations. - ClientWriteHandle* handle = clientMemoryTransferService.NewWriteHandle(); - - EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent))) - .WillOnce(InvokeWithoutArgs([=]() { return handle; })); - if (mappedAtCreation) { - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(handle)) - .WillOnce(Return(&mBufferContent)); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + FlushClient(); } - - return handle; } - void MockWriteHandleCreationFailure() { - EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent))) - .WillOnce(InvokeWithoutArgs([=]() { return nullptr; })); - } - - void ExpectWriteHandleSerialization(ClientWriteHandle* handle) { - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreateSize(handle)) - .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); })); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreate(handle, _)) - .WillOnce(WithArg<1>([&](void* serializePointer) { - memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo)); - return sizeof(mSerializeCreateInfo); - })); - } - - ServerWriteHandle* ExpectServerWriteHandleDeserialization() { - // Create the handle first so it can be used in later expectations. - ServerWriteHandle* handle = serverMemoryTransferService.NewWriteHandle(); - - EXPECT_CALL(serverMemoryTransferService, - OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)), - sizeof(mSerializeCreateInfo), _)) - .WillOnce(WithArg<2>([=](server::MemoryTransferService::WriteHandle** writeHandle) { - *writeHandle = handle; - return true; - })); - - return handle; - } - - void MockServerWriteHandleDeserializeFailure() { - EXPECT_CALL(serverMemoryTransferService, - OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)), - sizeof(mSerializeCreateInfo), _)) - .WillOnce(Return(false)); - } - - void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) { - EXPECT_CALL(clientMemoryTransferService, - OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); })); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeDataUpdate(handle, _, _, _)) - .WillOnce(WithArg<1>([&](void* serializePointer) { - memcpy(serializePointer, &mWriteHandleSerializeDataInfo, - sizeof(mWriteHandleSerializeDataInfo)); - return sizeof(mWriteHandleSerializeDataInfo); - })); - } - - void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle, - uint32_t expectedData) { - EXPECT_CALL( - serverMemoryTransferService, - OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)), - sizeof(mWriteHandleSerializeDataInfo), _, _)) - .WillOnce(Return(true)); - } - - void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) { - EXPECT_CALL( - serverMemoryTransferService, - OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)), - sizeof(mWriteHandleSerializeDataInfo), _, _)) - .WillOnce(Return(false)); - } - - // Arbitrary values used within tests to check if serialized data is correctly passed - // between the client and server. The static data changes between runs of the tests and - // test expectations will check that serialized values are passed to the respective - // deserialization function. - static uint32_t mSerializeCreateInfo; - static uint32_t mReadHandleSerializeDataInfo; - static uint32_t mWriteHandleSerializeDataInfo; - - // Represents the buffer contents for the test. - static uint32_t mBufferContent; - - static constexpr size_t kBufferSize = sizeof(mBufferContent); - - // The client's zero-initialized buffer for writing. - uint32_t mMappedBufferContent = 0; - - // |mMappedBufferContent| should be set equal to |mUpdatedBufferContent| when the client - // performs a write. Test expectations should check that |mBufferContent == - // mUpdatedBufferContent| after all writes are flushed. - static uint32_t mUpdatedBufferContent; - - testing::StrictMock serverMemoryTransferService; - testing::StrictMock clientMemoryTransferService; -}; - -uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337; -uint32_t WireMemoryTransferServiceTests::mUpdatedBufferContent = 2349; -uint32_t WireMemoryTransferServiceTests::mSerializeCreateInfo = 4242; -uint32_t WireMemoryTransferServiceTests::mReadHandleSerializeDataInfo = 1394; -uint32_t WireMemoryTransferServiceTests::mWriteHandleSerializeDataInfo = 1235; - -// Test successful mapping for reading. -TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a ReadHandle on creation. - ClientReadHandle* clientHandle = ExpectReadHandleCreation(); - ExpectReadHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); - - // The server should deserialize the read handle from the client and then serialize - // an initialization message. - ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); - - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // The handle serialize data update on mapAsync cmd - ExpectServerReadHandleSerializeDataUpdate(serverHandle); - - // Mock a successful callback - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle)) - .WillOnce(Return(&mBufferContent)); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&mBufferContent)); - - FlushClient(); - - // The client receives a successful callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - - // The client should receive the handle data update message from the server. - ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent); - - FlushServer(); - - wgpuBufferUnmap(buffer); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); -} - -// Test ReadHandle destroy behavior -TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroy) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a ReadHandle on creation. - ClientReadHandle* clientHandle = ExpectReadHandleCreation(); - ExpectReadHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); - - // The server should deserialize the read handle from the client and then serialize - // an initialization message. - ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); - - FlushClient(); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); - wgpuBufferDestroy(buffer); - EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); - EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); - - FlushClient(); -} - -// Test unsuccessful mapping for reading. -TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a ReadHandle on creation. - ClientReadHandle* clientHandle = ExpectReadHandleCreation(); - ExpectReadHandleSerialization(clientHandle); - - // The server should deserialize the ReadHandle from the client. - ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // Mock a failed callback. - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); - - FlushClient(); - - // The client receives an error callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - - FlushServer(); - - wgpuBufferUnmap(buffer); - - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); -} - -// Test ReadHandle creation failure. -TEST_F(WireMemoryTransferServiceTests, BufferMapReadHandleCreationFailure) { - // Mock a ReadHandle creation failure - MockReadHandleCreationFailure(); - - WGPUBufferDescriptor descriptor = {}; - descriptor.size = kBufferSize; - descriptor.usage = WGPUBufferUsage_MapRead; - - wgpuDeviceCreateBuffer(device, &descriptor); -} - -// Test MapRead DeserializeReadHandle failure. -TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeReadHandleFailure) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a ReadHandle on mapping for reading.. - ClientReadHandle* clientHandle = ExpectReadHandleCreation(); - ExpectReadHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); - - // Mock a Deserialization failure. - MockServerReadHandleDeserializeFailure(); - - FlushClient(false); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); -} - -// Test read handle DeserializeDataUpdate failure. -TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a ReadHandle on mapping for reading. - ClientReadHandle* clientHandle = ExpectReadHandleCreation(); - ExpectReadHandleSerialization(clientHandle); - - // The server should deserialize the read handle from the client and then serialize - // an initialization message. - ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // The handle serialize data update on mapAsync cmd - ExpectServerReadHandleSerializeDataUpdate(serverHandle); - - // Mock a successful callback - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&mBufferContent)); - - FlushClient(); - - // The client should receive the handle data update message from the server. - // Mock a deserialization failure. - MockClientReadHandleDeserializeDataUpdateFailure(clientHandle); - - // Failed deserialization is a fatal failure and the client synchronously receives a - // DEVICE_LOST callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, _)).Times(1); - - FlushServer(false); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); -} - -// Test mapping for reading destroying the buffer before unmapping on the client side. -TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a ReadHandle on mapping for reading.. - ClientReadHandle* clientHandle = ExpectReadHandleCreation(); - ExpectReadHandleSerialization(clientHandle); - - // The server should deserialize the read handle from the client and then serialize - // an initialization message. - ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize(); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead); - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // The handle serialize data update on mapAsync cmd - ExpectServerReadHandleSerializeDataUpdate(serverHandle); - - // Mock a successful callback - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle)) - .WillOnce(Return(&mBufferContent)); - EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&mBufferContent)); - - FlushClient(); - - // The client receives a successful callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - - // The client should receive the handle data update message from the server. - ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent); - - FlushServer(); - - // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping - // immediately, both in the client and server side. - { - EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1); - wgpuBufferDestroy(buffer); - - EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1); - EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); + // Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch + // data pointer to ReadHandle + TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) { + // The client should create and serialize a ReadHandle and a WriteHandle on + // createBufferMapped. + ClientReadHandle* clientReadHandle = ExpectReadHandleCreation(); + ExpectReadHandleSerialization(clientReadHandle); + ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true); + ExpectWriteHandleSerialization(clientWriteHandle); + + // The server should then deserialize a ReadHandle and a WriteHandle from the client. + ServerReadHandle* serverReadHandle = ExpectServerReadHandleDeserialize(); + ServerWriteHandle* serverWriteHandle = ExpectServerWriteHandleDeserialization(); + + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapRead); FlushClient(); - // The handle is already destroyed so unmap only results in a server unmap call. + // Update the mapped contents. + mMappedBufferContent = mUpdatedBufferContent; + + // When the client Unmaps the buffer, it will serialize data update writes to the handle and + // destroy it. + ExpectClientWriteHandleSerializeDataUpdate(clientWriteHandle); + EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientWriteHandle)).Times(1); + EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientReadHandle)) + .WillOnce(Return(&mBufferContent)); wgpuBufferUnmap(buffer); + // The server deserializes the data update message. + ExpectServerWriteHandleDeserializeDataUpdate(serverWriteHandle, mUpdatedBufferContent); EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverWriteHandle)).Times(1); FlushClient(); + + // The ReadHandle will be destoryed on buffer destroy. + EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientReadHandle)).Times(1); + EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverReadHandle)).Times(1); } -} -// Test successful mapping for writing. -TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; + // Test WriteHandle preserves after unmap for a buffer with mappedAtCreation and MapWrite usage + TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) { + // The client should create and serialize a WriteHandle on createBufferMapped. + ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); - ExpectWriteHandleSerialization(clientHandle); + ExpectWriteHandleSerialization(clientHandle); - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); + // The server should then deserialize the WriteHandle from the client. + ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); + WGPUBuffer buffer; + WGPUBuffer apiBuffer; + std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapWrite); + FlushClient(); - FlushClient(); + // Update the mapped contents. + mMappedBufferContent = mUpdatedBufferContent; - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); + // When the client Unmaps the buffer, it will serialize data update writes to the handle. + ExpectClientWriteHandleSerializeDataUpdate(clientHandle); - // Mock a successful callback. - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle)) - .WillOnce(Return(&mBufferContent)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&mMappedBufferContent)); + wgpuBufferUnmap(buffer); - FlushClient(); + // The server deserializes the data update message. + ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent); + EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - // The client receives a successful callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); + FlushClient(); - FlushServer(); - - // The client writes to the handle contents. - mMappedBufferContent = mUpdatedBufferContent; - - // The client will then serialize data update and destroy the handle on Unmap() - ExpectClientWriteHandleSerializeDataUpdate(clientHandle); - - wgpuBufferUnmap(buffer); - - // The server deserializes the data update message. - ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent); - - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); -} - -// Test WriteHandle destroy behavior -TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroy) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); - ExpectWriteHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - FlushClient(); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - wgpuBufferDestroy(buffer); - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); - EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); - - FlushClient(); -} - -// Test unsuccessful MapWrite. -TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a WriteHandle on buffer creation with MapWrite usage. - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); - ExpectWriteHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // Mock an error callback. - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs( - [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); })); - - FlushClient(); - - // The client receives an error callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1); - - FlushServer(); - - wgpuBufferUnmap(buffer); - - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); -} - -// Test WriteHandle creation failure. -TEST_F(WireMemoryTransferServiceTests, BufferMapWriteHandleCreationFailure) { - // Mock a WriteHandle creation failure - MockWriteHandleCreationFailure(); - - WGPUBufferDescriptor descriptor = {}; - descriptor.size = kBufferSize; - descriptor.usage = WGPUBufferUsage_MapWrite; - - wgpuDeviceCreateBuffer(device, &descriptor); -} - -// Test MapWrite DeserializeWriteHandle failure. -TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailure) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - // The client should create and serialize a WriteHandle on buffer creation with MapWrite usage. - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); - ExpectWriteHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); - - // Mock a deserialization failure. - MockServerWriteHandleDeserializeFailure(); - - FlushClient(false); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); -} - -// Test MapWrite DeserializeDataUpdate failure. -TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailure) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); - ExpectWriteHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // Mock a successful callback. - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle)) - .WillOnce(Return(&mBufferContent)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&mMappedBufferContent)); - - FlushClient(); - - // The client receives a success callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - - FlushServer(); - - // The client writes to the handle contents. - mMappedBufferContent = mUpdatedBufferContent; - - // The client will then serialize data update - ExpectClientWriteHandleSerializeDataUpdate(clientHandle); - - wgpuBufferUnmap(buffer); - - // The server deserializes the data update message. Mock a deserialization failure. - MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle); - - FlushClient(false); - - // The handle is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); -} - -// Test MapWrite destroying the buffer before unmapping on the client side. -TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) { - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false); - ExpectWriteHandleSerialization(clientHandle); - - std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - FlushClient(); - - wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr); - - // Mock a successful callback. - EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success); - })); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle)) - .WillOnce(Return(&mBufferContent)); - EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)) - .WillOnce(Return(&mMappedBufferContent)); - - FlushClient(); - - // The client receives a successful callback. - EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1); - - FlushServer(); - - // The client writes to the handle contents. - mMappedBufferContent = mUpdatedBufferContent; - - // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping - // immediately, both in the client and server side. - { - // The handle is destroyed once the buffer is destroyed. + // The writeHandle is preserved after unmap and is destroyed once the buffer is destroyed. EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - - wgpuBufferDestroy(buffer); - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); - EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); - FlushClient(); - - // The handle is already destroyed so unmap only results in a server unmap call. - wgpuBufferUnmap(buffer); - - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - FlushClient(); } -} -// Test successful buffer creation with mappedAtCreation = true. -TEST_F(WireMemoryTransferServiceTests, MappedAtCreationSuccess) { - // The client should create and serialize a WriteHandle on createBufferMapped. - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); - ExpectWriteHandleSerialization(clientHandle); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - std::tie(apiBuffer, buffer) = CreateBufferMapped(); - FlushClient(); - - // Update the mapped contents. - mMappedBufferContent = mUpdatedBufferContent; - - // When the client Unmaps the buffer, it will serialize data update writes to the handle and - // destroy it. - ExpectClientWriteHandleSerializeDataUpdate(clientHandle); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - - wgpuBufferUnmap(buffer); - - // The server deserializes the data update message. - ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent); - - // After the handle is updated it can be destroyed. - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); -} - -// Test buffer creation with mappedAtCreation WriteHandle creation failure. -TEST_F(WireMemoryTransferServiceTests, MappedAtCreationWriteHandleCreationFailure) { - // Mock a WriteHandle creation failure - MockWriteHandleCreationFailure(); - - WGPUBufferDescriptor descriptor = {}; - descriptor.size = sizeof(mBufferContent); - descriptor.mappedAtCreation = true; - - WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor); - EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, sizeof(mBufferContent))); -} - -// Test buffer creation with mappedAtCreation DeserializeWriteHandle failure. -TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeWriteHandleFailure) { - // The client should create and serialize a WriteHandle on createBufferMapped. - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); - ExpectWriteHandleSerialization(clientHandle); - - // The server should then deserialize the WriteHandle from the client. - MockServerWriteHandleDeserializeFailure(); - - WGPUBufferDescriptor descriptor = {}; - descriptor.size = sizeof(mBufferContent); - descriptor.mappedAtCreation = true; - - WGPUBuffer apiBuffer = api.GetNewBuffer(); - - wgpuDeviceCreateBuffer(device, &descriptor); - - EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer)); - // Now bufferGetMappedRange won't be called if deserialize writeHandle fails - - FlushClient(false); - - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); -} - -// Test buffer creation with mappedAtCreation = true DeserializeDataUpdate failure. -TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeDataUpdateFailure) { - // The client should create and serialize a WriteHandle on createBufferMapped. - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); - ExpectWriteHandleSerialization(clientHandle); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - std::tie(apiBuffer, buffer) = CreateBufferMapped(); - FlushClient(); - - // Update the mapped contents. - mMappedBufferContent = mUpdatedBufferContent; - - // When the client Unmaps the buffer, it will serialize data update writes to the handle and - // destroy it. - ExpectClientWriteHandleSerializeDataUpdate(clientHandle); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - - wgpuBufferUnmap(buffer); - - // The server deserializes the data update message. Mock a deserialization failure. - MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle); - - FlushClient(false); - - // Failed BufferUpdateMappedData cmd will early return so BufferUnmap is not processed. - // The server side writeHandle is destructed at buffer destruction. - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); -} - -// Test mappedAtCreation=true destroying the buffer before unmapping on the client side. -TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) { - // The client should create and serialize a WriteHandle on createBufferMapped. - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); - ExpectWriteHandleSerialization(clientHandle); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - std::tie(apiBuffer, buffer) = CreateBufferMapped(); - FlushClient(); - - // Update the mapped contents. - mMappedBufferContent = mUpdatedBufferContent; - - // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping - // immediately, both in the client and server side. - { - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - wgpuBufferDestroy(buffer); - - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); - EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1); - FlushClient(); - - // The handle is already destroyed so unmap only results in a server unmap call. - wgpuBufferUnmap(buffer); - - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - FlushClient(); - } -} - -// Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch -// data pointer to ReadHandle -TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) { - // The client should create and serialize a ReadHandle and a WriteHandle on createBufferMapped. - ClientReadHandle* clientReadHandle = ExpectReadHandleCreation(); - ExpectReadHandleSerialization(clientReadHandle); - ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true); - ExpectWriteHandleSerialization(clientWriteHandle); - - // The server should then deserialize a ReadHandle and a WriteHandle from the client. - ServerReadHandle* serverReadHandle = ExpectServerReadHandleDeserialize(); - ServerWriteHandle* serverWriteHandle = ExpectServerWriteHandleDeserialization(); - - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapRead); - FlushClient(); - - // Update the mapped contents. - mMappedBufferContent = mUpdatedBufferContent; - - // When the client Unmaps the buffer, it will serialize data update writes to the handle and - // destroy it. - ExpectClientWriteHandleSerializeDataUpdate(clientWriteHandle); - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientWriteHandle)).Times(1); - EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientReadHandle)) - .WillOnce(Return(&mBufferContent)); - wgpuBufferUnmap(buffer); - - // The server deserializes the data update message. - ExpectServerWriteHandleDeserializeDataUpdate(serverWriteHandle, mUpdatedBufferContent); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverWriteHandle)).Times(1); - FlushClient(); - - // The ReadHandle will be destoryed on buffer destroy. - EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientReadHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverReadHandle)).Times(1); -} - -// Test WriteHandle preserves after unmap for a buffer with mappedAtCreation and MapWrite usage -TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) { - // The client should create and serialize a WriteHandle on createBufferMapped. - ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true); - - ExpectWriteHandleSerialization(clientHandle); - - // The server should then deserialize the WriteHandle from the client. - ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization(); - - WGPUBuffer buffer; - WGPUBuffer apiBuffer; - std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapWrite); - FlushClient(); - - // Update the mapped contents. - mMappedBufferContent = mUpdatedBufferContent; - - // When the client Unmaps the buffer, it will serialize data update writes to the handle. - ExpectClientWriteHandleSerializeDataUpdate(clientHandle); - - wgpuBufferUnmap(buffer); - - // The server deserializes the data update message. - ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent); - EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1); - - FlushClient(); - - // The writeHandle is preserved after unmap and is destroyed once the buffer is destroyed. - EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1); - EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1); -} +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireOptionalTests.cpp b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp index 9fafa3b996..b95a588689 100644 --- a/src/dawn/tests/unittests/wire/WireOptionalTests.cpp +++ b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp @@ -14,166 +14,173 @@ #include "dawn/tests/unittests/wire/WireTest.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class WireOptionalTests : public WireTest { - public: - WireOptionalTests() { + using testing::_; + using testing::Return; + + class WireOptionalTests : public WireTest { + public: + WireOptionalTests() { + } + ~WireOptionalTests() override = default; + }; + + // Test passing nullptr instead of objects - object as value version + TEST_F(WireOptionalTests, OptionalObjectValue) { + WGPUBindGroupLayoutDescriptor bglDesc = {}; + bglDesc.entryCount = 0; + WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc); + + WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout(); + EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)) + .WillOnce(Return(apiBindGroupLayout)); + + // The `sampler`, `textureView` and `buffer` members of a binding are optional. + WGPUBindGroupEntry entry; + entry.binding = 0; + entry.sampler = nullptr; + entry.textureView = nullptr; + entry.buffer = nullptr; + entry.nextInChain = nullptr; + + WGPUBindGroupDescriptor bgDesc = {}; + bgDesc.layout = bgl; + bgDesc.entryCount = 1; + bgDesc.entries = &entry; + + wgpuDeviceCreateBindGroup(device, &bgDesc); + + WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup(); + EXPECT_CALL(api, + DeviceCreateBindGroup( + apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool { + return desc->nextInChain == nullptr && desc->entryCount == 1 && + desc->entries[0].binding == 0 && + desc->entries[0].sampler == nullptr && + desc->entries[0].buffer == nullptr && + desc->entries[0].textureView == nullptr; + }))) + .WillOnce(Return(apiPlaceholderBindGroup)); + + FlushClient(); } - ~WireOptionalTests() override = default; -}; -// Test passing nullptr instead of objects - object as value version -TEST_F(WireOptionalTests, OptionalObjectValue) { - WGPUBindGroupLayoutDescriptor bglDesc = {}; - bglDesc.entryCount = 0; - WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc); + // Test that the wire is able to send optional pointers to structures + TEST_F(WireOptionalTests, OptionalStructPointer) { + // Create shader module + WGPUShaderModuleDescriptor vertexDescriptor = {}; + WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); + WGPUShaderModule apiVsModule = api.GetNewShaderModule(); + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); - WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout(); - EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)) - .WillOnce(Return(apiBindGroupLayout)); + // Create the color state descriptor + WGPUBlendComponent blendComponent = {}; + blendComponent.operation = WGPUBlendOperation_Add; + blendComponent.srcFactor = WGPUBlendFactor_One; + blendComponent.dstFactor = WGPUBlendFactor_One; + WGPUBlendState blendState = {}; + blendState.alpha = blendComponent; + blendState.color = blendComponent; + WGPUColorTargetState colorTargetState = {}; + colorTargetState.format = WGPUTextureFormat_RGBA8Unorm; + colorTargetState.blend = &blendState; + colorTargetState.writeMask = WGPUColorWriteMask_All; - // The `sampler`, `textureView` and `buffer` members of a binding are optional. - WGPUBindGroupEntry entry; - entry.binding = 0; - entry.sampler = nullptr; - entry.textureView = nullptr; - entry.buffer = nullptr; - entry.nextInChain = nullptr; + // Create the depth-stencil state + WGPUStencilFaceState stencilFace = {}; + stencilFace.compare = WGPUCompareFunction_Always; + stencilFace.failOp = WGPUStencilOperation_Keep; + stencilFace.depthFailOp = WGPUStencilOperation_Keep; + stencilFace.passOp = WGPUStencilOperation_Keep; - WGPUBindGroupDescriptor bgDesc = {}; - bgDesc.layout = bgl; - bgDesc.entryCount = 1; - bgDesc.entries = &entry; + WGPUDepthStencilState depthStencilState = {}; + depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8; + depthStencilState.depthWriteEnabled = false; + depthStencilState.depthCompare = WGPUCompareFunction_Always; + depthStencilState.stencilBack = stencilFace; + depthStencilState.stencilFront = stencilFace; + depthStencilState.stencilReadMask = 0xff; + depthStencilState.stencilWriteMask = 0xff; + depthStencilState.depthBias = 0; + depthStencilState.depthBiasSlopeScale = 0.0; + depthStencilState.depthBiasClamp = 0.0; - wgpuDeviceCreateBindGroup(device, &bgDesc); + // Create the pipeline layout + WGPUPipelineLayoutDescriptor layoutDescriptor = {}; + layoutDescriptor.bindGroupLayoutCount = 0; + layoutDescriptor.bindGroupLayouts = nullptr; + WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor); + WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout(); + EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout)); - WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup(); - EXPECT_CALL(api, DeviceCreateBindGroup( - apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool { - return desc->nextInChain == nullptr && desc->entryCount == 1 && - desc->entries[0].binding == 0 && - desc->entries[0].sampler == nullptr && - desc->entries[0].buffer == nullptr && - desc->entries[0].textureView == nullptr; - }))) - .WillOnce(Return(apiPlaceholderBindGroup)); + // Create pipeline + WGPURenderPipelineDescriptor pipelineDescriptor = {}; - FlushClient(); -} + pipelineDescriptor.vertex.module = vsModule; + pipelineDescriptor.vertex.entryPoint = "main"; + pipelineDescriptor.vertex.bufferCount = 0; + pipelineDescriptor.vertex.buffers = nullptr; -// Test that the wire is able to send optional pointers to structures -TEST_F(WireOptionalTests, OptionalStructPointer) { - // Create shader module - WGPUShaderModuleDescriptor vertexDescriptor = {}; - WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); - WGPUShaderModule apiVsModule = api.GetNewShaderModule(); - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule)); + WGPUFragmentState fragment = {}; + fragment.module = vsModule; + fragment.entryPoint = "main"; + fragment.targetCount = 1; + fragment.targets = &colorTargetState; + pipelineDescriptor.fragment = &fragment; - // Create the color state descriptor - WGPUBlendComponent blendComponent = {}; - blendComponent.operation = WGPUBlendOperation_Add; - blendComponent.srcFactor = WGPUBlendFactor_One; - blendComponent.dstFactor = WGPUBlendFactor_One; - WGPUBlendState blendState = {}; - blendState.alpha = blendComponent; - blendState.color = blendComponent; - WGPUColorTargetState colorTargetState = {}; - colorTargetState.format = WGPUTextureFormat_RGBA8Unorm; - colorTargetState.blend = &blendState; - colorTargetState.writeMask = WGPUColorWriteMask_All; + pipelineDescriptor.multisample.count = 1; + pipelineDescriptor.multisample.mask = 0xFFFFFFFF; + pipelineDescriptor.multisample.alphaToCoverageEnabled = false; + pipelineDescriptor.layout = layout; + pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList; + pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW; + pipelineDescriptor.primitive.cullMode = WGPUCullMode_None; - // Create the depth-stencil state - WGPUStencilFaceState stencilFace = {}; - stencilFace.compare = WGPUCompareFunction_Always; - stencilFace.failOp = WGPUStencilOperation_Keep; - stencilFace.depthFailOp = WGPUStencilOperation_Keep; - stencilFace.passOp = WGPUStencilOperation_Keep; + // First case: depthStencil is not null. + pipelineDescriptor.depthStencil = &depthStencilState; + wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); - WGPUDepthStencilState depthStencilState = {}; - depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8; - depthStencilState.depthWriteEnabled = false; - depthStencilState.depthCompare = WGPUCompareFunction_Always; - depthStencilState.stencilBack = stencilFace; - depthStencilState.stencilFront = stencilFace; - depthStencilState.stencilReadMask = 0xff; - depthStencilState.stencilWriteMask = 0xff; - depthStencilState.depthBias = 0; - depthStencilState.depthBiasSlopeScale = 0.0; - depthStencilState.depthBiasClamp = 0.0; + WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline(); + EXPECT_CALL( + api, + DeviceCreateRenderPipeline( + apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { + return desc->depthStencil != nullptr && + desc->depthStencil->nextInChain == nullptr && + desc->depthStencil->depthWriteEnabled == false && + desc->depthStencil->depthCompare == WGPUCompareFunction_Always && + desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always && + desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep && + desc->depthStencil->stencilBack.depthFailOp == + WGPUStencilOperation_Keep && + desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep && + desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always && + desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep && + desc->depthStencil->stencilFront.depthFailOp == + WGPUStencilOperation_Keep && + desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep && + desc->depthStencil->stencilReadMask == 0xff && + desc->depthStencil->stencilWriteMask == 0xff && + desc->depthStencil->depthBias == 0 && + desc->depthStencil->depthBiasSlopeScale == 0.0 && + desc->depthStencil->depthBiasClamp == 0.0; + }))) + .WillOnce(Return(apiPlaceholderPipeline)); - // Create the pipeline layout - WGPUPipelineLayoutDescriptor layoutDescriptor = {}; - layoutDescriptor.bindGroupLayoutCount = 0; - layoutDescriptor.bindGroupLayouts = nullptr; - WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor); - WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout(); - EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout)); + FlushClient(); - // Create pipeline - WGPURenderPipelineDescriptor pipelineDescriptor = {}; + // Second case: depthStencil is null. + pipelineDescriptor.depthStencil = nullptr; + wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); + EXPECT_CALL( + api, DeviceCreateRenderPipeline( + apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { + return desc->depthStencil == nullptr; + }))) + .WillOnce(Return(apiPlaceholderPipeline)); - pipelineDescriptor.vertex.module = vsModule; - pipelineDescriptor.vertex.entryPoint = "main"; - pipelineDescriptor.vertex.bufferCount = 0; - pipelineDescriptor.vertex.buffers = nullptr; + FlushClient(); + } - WGPUFragmentState fragment = {}; - fragment.module = vsModule; - fragment.entryPoint = "main"; - fragment.targetCount = 1; - fragment.targets = &colorTargetState; - pipelineDescriptor.fragment = &fragment; - - pipelineDescriptor.multisample.count = 1; - pipelineDescriptor.multisample.mask = 0xFFFFFFFF; - pipelineDescriptor.multisample.alphaToCoverageEnabled = false; - pipelineDescriptor.layout = layout; - pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList; - pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW; - pipelineDescriptor.primitive.cullMode = WGPUCullMode_None; - - // First case: depthStencil is not null. - pipelineDescriptor.depthStencil = &depthStencilState; - wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); - - WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline(); - EXPECT_CALL( - api, - DeviceCreateRenderPipeline( - apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { - return desc->depthStencil != nullptr && - desc->depthStencil->nextInChain == nullptr && - desc->depthStencil->depthWriteEnabled == false && - desc->depthStencil->depthCompare == WGPUCompareFunction_Always && - desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always && - desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep && - desc->depthStencil->stencilBack.depthFailOp == WGPUStencilOperation_Keep && - desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep && - desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always && - desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep && - desc->depthStencil->stencilFront.depthFailOp == WGPUStencilOperation_Keep && - desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep && - desc->depthStencil->stencilReadMask == 0xff && - desc->depthStencil->stencilWriteMask == 0xff && - desc->depthStencil->depthBias == 0 && - desc->depthStencil->depthBiasSlopeScale == 0.0 && - desc->depthStencil->depthBiasClamp == 0.0; - }))) - .WillOnce(Return(apiPlaceholderPipeline)); - - FlushClient(); - - // Second case: depthStencil is null. - pipelineDescriptor.depthStencil = nullptr; - wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); - EXPECT_CALL(api, - DeviceCreateRenderPipeline( - apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { - return desc->depthStencil == nullptr; - }))) - .WillOnce(Return(apiPlaceholderPipeline)); - - FlushClient(); -} +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireQueueTests.cpp b/src/dawn/tests/unittests/wire/WireQueueTests.cpp index bd759bb424..536ad776e7 100644 --- a/src/dawn/tests/unittests/wire/WireQueueTests.cpp +++ b/src/dawn/tests/unittests/wire/WireQueueTests.cpp @@ -17,125 +17,131 @@ #include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/wire/WireClient.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -class MockQueueWorkDoneCallback { - public: - MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata)); -}; + using testing::_; + using testing::InvokeWithoutArgs; + using testing::Mock; -static std::unique_ptr mockQueueWorkDoneCallback; -static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) { - mockQueueWorkDoneCallback->Call(status, userdata); -} + class MockQueueWorkDoneCallback { + public: + MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata)); + }; -class WireQueueTests : public WireTest { - protected: - void SetUp() override { - WireTest::SetUp(); - mockQueueWorkDoneCallback = std::make_unique(); + static std::unique_ptr mockQueueWorkDoneCallback; + static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) { + mockQueueWorkDoneCallback->Call(status, userdata); } - void TearDown() override { - WireTest::TearDown(); - mockQueueWorkDoneCallback = nullptr; + class WireQueueTests : public WireTest { + protected: + void SetUp() override { + WireTest::SetUp(); + mockQueueWorkDoneCallback = std::make_unique(); + } + + void TearDown() override { + WireTest::TearDown(); + mockQueueWorkDoneCallback = nullptr; + } + + void FlushServer() { + WireTest::FlushServer(); + Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback); + } + }; + + // Test that a successful OnSubmittedWorkDone call is forwarded to the client. + TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) { + wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); + EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success); + })); + FlushClient(); + + EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)) + .Times(1); + FlushServer(); } - void FlushServer() { - WireTest::FlushServer(); - Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback); + // Test that an error OnSubmittedWorkDone call is forwarded as an error to the client. + TEST_F(WireQueueTests, OnSubmittedWorkDoneError) { + wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); + EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error); + })); + FlushClient(); + + EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1); + FlushServer(); } -}; -// Test that a successful OnSubmittedWorkDone call is forwarded to the client. -TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) { - wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); - EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success); - })); - FlushClient(); + // Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with + // device loss + TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) { + wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); + EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error); + })); + FlushClient(); - EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1); - FlushServer(); -} - -// Test that an error OnSubmittedWorkDone call is forwarded as an error to the client. -TEST_F(WireQueueTests, OnSubmittedWorkDoneError) { - wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); - EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error); - })); - FlushClient(); - - EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1); - FlushServer(); -} - -// Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with -// device loss -TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) { - wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); - EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error); - })); - FlushClient(); - - EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this)) - .Times(1); - GetWireClient()->Disconnect(); -} - -// Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with -// device loss -TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) { - GetWireClient()->Disconnect(); - - EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this)) - .Times(1); - wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); -} - -// Hack to pass in test context into user callback -struct TestData { - WireQueueTests* pTest; - WGPUQueue* pTestQueue; - size_t numRequests; -}; - -static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) { - TestData* testData = reinterpret_cast(userdata); - // Mimic the user callback is sending new requests - ASSERT_NE(testData, nullptr); - ASSERT_NE(testData->pTest, nullptr); - ASSERT_NE(testData->pTestQueue, nullptr); - mockQueueWorkDoneCallback->Call(status, testData->pTest); - - // Send the requests a number of times - for (size_t i = 0; i < testData->numRequests; i++) { - wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone, - testData->pTest); + EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this)) + .Times(1); + GetWireClient()->Disconnect(); } -} -// Test that requests inside user callbacks before disconnect are called -TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) { - TestData testData = {this, &queue, 10}; - wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData); - EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error); - })); - FlushClient(); + // Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with + // device loss + TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) { + GetWireClient()->Disconnect(); - EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this)) - .Times(1 + testData.numRequests); - GetWireClient()->Disconnect(); -} + EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this)) + .Times(1); + wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); + } -// Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks -// since it is always destructed after the test TearDown, and we cannot create a new queue obj -// with wgpuDeviceGetQueue + // Hack to pass in test context into user callback + struct TestData { + WireQueueTests* pTest; + WGPUQueue* pTestQueue; + size_t numRequests; + }; + + static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) { + TestData* testData = reinterpret_cast(userdata); + // Mimic the user callback is sending new requests + ASSERT_NE(testData, nullptr); + ASSERT_NE(testData->pTest, nullptr); + ASSERT_NE(testData->pTestQueue, nullptr); + mockQueueWorkDoneCallback->Call(status, testData->pTest); + + // Send the requests a number of times + for (size_t i = 0; i < testData->numRequests; i++) { + wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone, + testData->pTest); + } + } + + // Test that requests inside user callbacks before disconnect are called + TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) { + TestData testData = {this, &queue, 10}; + wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData); + EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error); + })); + FlushClient(); + + EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this)) + .Times(1 + testData.numRequests); + GetWireClient()->Disconnect(); + } + + // Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks + // since it is always destructed after the test TearDown, and we cannot create a new queue obj + // with wgpuDeviceGetQueue + +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp index 0bb8d3f1c3..48c93d2990 100644 --- a/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp +++ b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp @@ -17,220 +17,228 @@ #include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/wire/WireClient.h" -using namespace testing; -using namespace dawn::wire; +namespace dawn::wire { -namespace { + using testing::_; + using testing::InvokeWithoutArgs; + using testing::Mock; + using testing::Return; + using testing::StrictMock; - // Mock class to add expectations on the wire calling callbacks - class MockCompilationInfoCallback { + namespace { + + // Mock class to add expectations on the wire calling callbacks + class MockCompilationInfoCallback { + public: + MOCK_METHOD(void, + Call, + (WGPUCompilationInfoRequestStatus status, + const WGPUCompilationInfo* info, + void* userdata)); + }; + + std::unique_ptr> mockCompilationInfoCallback; + void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status, + const WGPUCompilationInfo* info, + void* userdata) { + mockCompilationInfoCallback->Call(status, info, userdata); + } + + } // anonymous namespace + + class WireShaderModuleTests : public WireTest { public: - MOCK_METHOD(void, - Call, - (WGPUCompilationInfoRequestStatus status, - const WGPUCompilationInfo* info, - void* userdata)); + WireShaderModuleTests() { + } + ~WireShaderModuleTests() override = default; + + void SetUp() override { + WireTest::SetUp(); + + mockCompilationInfoCallback = + std::make_unique>(); + apiShaderModule = api.GetNewShaderModule(); + + WGPUShaderModuleDescriptor descriptor = {}; + shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor); + + EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)) + .WillOnce(Return(apiShaderModule)) + .RetiresOnSaturation(); + FlushClient(); + } + + void TearDown() override { + WireTest::TearDown(); + + // Delete mock so that expectations are checked + mockCompilationInfoCallback = nullptr; + } + + void FlushClient() { + WireTest::FlushClient(); + Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback); + } + + void FlushServer() { + WireTest::FlushServer(); + Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback); + } + + protected: + WGPUShaderModule shaderModule; + WGPUShaderModule apiShaderModule; }; - std::unique_ptr> mockCompilationInfoCallback; - void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status, - const WGPUCompilationInfo* info, - void* userdata) { - mockCompilationInfoCallback->Call(status, info, userdata); - } + // Check getting CompilationInfo for a successfully created shader module + TEST_F(WireShaderModuleTests, GetCompilationInfo) { + wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr); -} // anonymous namespace + WGPUCompilationMessage message = { + nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; + WGPUCompilationInfo compilationInfo; + compilationInfo.nextInChain = nullptr; + compilationInfo.messageCount = 1; + compilationInfo.messages = &message; -class WireShaderModuleTests : public WireTest { - public: - WireShaderModuleTests() { - } - ~WireShaderModuleTests() override = default; + EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallShaderModuleGetCompilationInfoCallback( + apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); + })); - void SetUp() override { - WireTest::SetUp(); - - mockCompilationInfoCallback = std::make_unique>(); - apiShaderModule = api.GetNewShaderModule(); - - WGPUShaderModuleDescriptor descriptor = {}; - shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor); - - EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)) - .WillOnce(Return(apiShaderModule)) - .RetiresOnSaturation(); FlushClient(); + + EXPECT_CALL(*mockCompilationInfoCallback, + Call(WGPUCompilationInfoRequestStatus_Success, + MatchesLambda([&](const WGPUCompilationInfo* info) -> bool { + if (info->messageCount != compilationInfo.messageCount) { + return false; + } + const WGPUCompilationMessage* infoMessage = &info->messages[0]; + return strcmp(infoMessage->message, message.message) == 0 && + infoMessage->nextInChain == message.nextInChain && + infoMessage->type == message.type && + infoMessage->lineNum == message.lineNum && + infoMessage->linePos == message.linePos && + infoMessage->offset == message.offset && + infoMessage->length == message.length; + }), + _)) + .Times(1); + FlushServer(); } - void TearDown() override { - WireTest::TearDown(); + // Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a + // device loss. + TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) { + wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr); - // Delete mock so that expectations are checked - mockCompilationInfoCallback = nullptr; + WGPUCompilationMessage message = { + nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; + WGPUCompilationInfo compilationInfo; + compilationInfo.nextInChain = nullptr; + compilationInfo.messageCount = 1; + compilationInfo.messages = &message; + + EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallShaderModuleGetCompilationInfoCallback( + apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); + })); + FlushClient(); + + EXPECT_CALL(*mockCompilationInfoCallback, + Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _)); + GetWireClient()->Disconnect(); } - void FlushClient() { - WireTest::FlushClient(); - Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback); + // Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a + // device loss. + TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) { + GetWireClient()->Disconnect(); + EXPECT_CALL(*mockCompilationInfoCallback, + Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _)); + wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr); } - void FlushServer() { - WireTest::FlushServer(); - Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback); + // Hack to pass in test context into user callback + struct TestData { + WireShaderModuleTests* pTest; + WGPUShaderModule* pTestShaderModule; + size_t numRequests; + }; + + static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status, + const WGPUCompilationInfo* info, + void* userdata) { + TestData* testData = reinterpret_cast(userdata); + // Mimic the user callback is sending new requests + ASSERT_NE(testData, nullptr); + ASSERT_NE(testData->pTest, nullptr); + ASSERT_NE(testData->pTestShaderModule, nullptr); + + mockCompilationInfoCallback->Call(status, info, testData->pTest); + + // Send the requests a number of times + for (size_t i = 0; i < testData->numRequests; i++) { + wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule), + ToMockGetCompilationInfoCallback, nullptr); + } } - protected: - WGPUShaderModule shaderModule; - WGPUShaderModule apiShaderModule; -}; + // Test that requests inside user callbacks before disconnect are called + TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) { + TestData testData = {this, &shaderModule, 10}; -// Check getting CompilationInfo for a successfully created shader module -TEST_F(WireShaderModuleTests, GetCompilationInfo) { - wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr); + wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests, + &testData); - WGPUCompilationMessage message = { - nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; - WGPUCompilationInfo compilationInfo; - compilationInfo.nextInChain = nullptr; - compilationInfo.messageCount = 1; - compilationInfo.messages = &message; + WGPUCompilationMessage message = { + nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; + WGPUCompilationInfo compilationInfo; + compilationInfo.nextInChain = nullptr; + compilationInfo.messageCount = 1; + compilationInfo.messages = &message; - EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallShaderModuleGetCompilationInfoCallback( - apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); - })); + EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallShaderModuleGetCompilationInfoCallback( + apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); + })); + FlushClient(); - FlushClient(); - - EXPECT_CALL(*mockCompilationInfoCallback, - Call(WGPUCompilationInfoRequestStatus_Success, - MatchesLambda([&](const WGPUCompilationInfo* info) -> bool { - if (info->messageCount != compilationInfo.messageCount) { - return false; - } - const WGPUCompilationMessage* infoMessage = &info->messages[0]; - return strcmp(infoMessage->message, message.message) == 0 && - infoMessage->nextInChain == message.nextInChain && - infoMessage->type == message.type && - infoMessage->lineNum == message.lineNum && - infoMessage->linePos == message.linePos && - infoMessage->offset == message.offset && - infoMessage->length == message.length; - }), - _)) - .Times(1); - FlushServer(); -} - -// Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a device -// loss. -TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) { - wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr); - - WGPUCompilationMessage message = { - nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; - WGPUCompilationInfo compilationInfo; - compilationInfo.nextInChain = nullptr; - compilationInfo.messageCount = 1; - compilationInfo.messages = &message; - - EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallShaderModuleGetCompilationInfoCallback( - apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); - })); - FlushClient(); - - EXPECT_CALL(*mockCompilationInfoCallback, - Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _)); - GetWireClient()->Disconnect(); -} - -// Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a -// device loss. -TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) { - GetWireClient()->Disconnect(); - EXPECT_CALL(*mockCompilationInfoCallback, - Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _)); - wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr); -} - -// Hack to pass in test context into user callback -struct TestData { - WireShaderModuleTests* pTest; - WGPUShaderModule* pTestShaderModule; - size_t numRequests; -}; - -static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status, - const WGPUCompilationInfo* info, - void* userdata) { - TestData* testData = reinterpret_cast(userdata); - // Mimic the user callback is sending new requests - ASSERT_NE(testData, nullptr); - ASSERT_NE(testData->pTest, nullptr); - ASSERT_NE(testData->pTestShaderModule, nullptr); - - mockCompilationInfoCallback->Call(status, info, testData->pTest); - - // Send the requests a number of times - for (size_t i = 0; i < testData->numRequests; i++) { - wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule), - ToMockGetCompilationInfoCallback, nullptr); + EXPECT_CALL(*mockCompilationInfoCallback, + Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _)) + .Times(1 + testData.numRequests); + GetWireClient()->Disconnect(); } -} -// Test that requests inside user callbacks before disconnect are called -TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) { - TestData testData = {this, &shaderModule, 10}; + // Test that requests inside user callbacks before object destruction are called + TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) { + TestData testData = {this, &shaderModule, 10}; - wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests, - &testData); + wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests, + &testData); - WGPUCompilationMessage message = { - nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; - WGPUCompilationInfo compilationInfo; - compilationInfo.nextInChain = nullptr; - compilationInfo.messageCount = 1; - compilationInfo.messages = &message; + WGPUCompilationMessage message = { + nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; + WGPUCompilationInfo compilationInfo; + compilationInfo.nextInChain = nullptr; + compilationInfo.messageCount = 1; + compilationInfo.messages = &message; - EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallShaderModuleGetCompilationInfoCallback( - apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); - })); - FlushClient(); + EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) + .WillOnce(InvokeWithoutArgs([&]() { + api.CallShaderModuleGetCompilationInfoCallback( + apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); + })); + FlushClient(); - EXPECT_CALL(*mockCompilationInfoCallback, - Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _)) - .Times(1 + testData.numRequests); - GetWireClient()->Disconnect(); -} + EXPECT_CALL(*mockCompilationInfoCallback, + Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _)) + .Times(1 + testData.numRequests); + wgpuShaderModuleRelease(shaderModule); + } -// Test that requests inside user callbacks before object destruction are called -TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) { - TestData testData = {this, &shaderModule, 10}; - - wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests, - &testData); - - WGPUCompilationMessage message = { - nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; - WGPUCompilationInfo compilationInfo; - compilationInfo.nextInChain = nullptr; - compilationInfo.messageCount = 1; - compilationInfo.messages = &message; - - EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) - .WillOnce(InvokeWithoutArgs([&]() { - api.CallShaderModuleGetCompilationInfoCallback( - apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); - })); - FlushClient(); - - EXPECT_CALL(*mockCompilationInfoCallback, - Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _)) - .Times(1 + testData.numRequests); - wgpuShaderModuleRelease(shaderModule); -} +} // namespace dawn::wire diff --git a/src/dawn/tests/unittests/wire/WireTest.cpp b/src/dawn/tests/unittests/wire/WireTest.cpp index cba143e622..9397139277 100644 --- a/src/dawn/tests/unittests/wire/WireTest.cpp +++ b/src/dawn/tests/unittests/wire/WireTest.cpp @@ -19,8 +19,11 @@ #include "dawn/wire/WireClient.h" #include "dawn/wire/WireServer.h" -using namespace testing; -using namespace dawn::wire; +using testing::_; +using testing::AnyNumber; +using testing::Exactly; +using testing::Mock; +using testing::Return; WireTest::WireTest() { } @@ -28,11 +31,11 @@ WireTest::WireTest() { WireTest::~WireTest() { } -client::MemoryTransferService* WireTest::GetClientMemoryTransferService() { +dawn::wire::client::MemoryTransferService* WireTest::GetClientMemoryTransferService() { return nullptr; } -server::MemoryTransferService* WireTest::GetServerMemoryTransferService() { +dawn::wire::server::MemoryTransferService* WireTest::GetServerMemoryTransferService() { return nullptr; } @@ -50,19 +53,19 @@ void WireTest::SetUp() { mS2cBuf = std::make_unique(); mC2sBuf = std::make_unique(mWireServer.get()); - WireServerDescriptor serverDesc = {}; + dawn::wire::WireServerDescriptor serverDesc = {}; serverDesc.procs = &mockProcs; serverDesc.serializer = mS2cBuf.get(); serverDesc.memoryTransferService = GetServerMemoryTransferService(); - mWireServer.reset(new WireServer(serverDesc)); + mWireServer.reset(new dawn::wire::WireServer(serverDesc)); mC2sBuf->SetHandler(mWireServer.get()); - WireClientDescriptor clientDesc = {}; + dawn::wire::WireClientDescriptor clientDesc = {}; clientDesc.serializer = mC2sBuf.get(); clientDesc.memoryTransferService = GetClientMemoryTransferService(); - mWireClient.reset(new WireClient(clientDesc)); + mWireClient.reset(new dawn::wire::WireClient(clientDesc)); mS2cBuf->SetHandler(mWireClient.get()); dawnProcSetProcs(&dawn::wire::client::GetProcs()); diff --git a/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp index 2c74380332..cc020bc465 100644 --- a/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp +++ b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp @@ -26,23 +26,23 @@ #include "dawn/utils/ComboRenderPipelineDescriptor.h" #include "dawn/utils/WGPUHelpers.h" -constexpr uint32_t kRTSize = 4; +namespace dawn::native::d3d12 { -// Pooling tests are required to advance the GPU completed serial to reuse heaps. -// This requires Tick() to be called at-least |kFrameDepth| times. This constant -// should be updated if the internals of Tick() change. -constexpr uint32_t kFrameDepth = 2; + constexpr uint32_t kRTSize = 4; -using namespace dawn::native::d3d12; + // Pooling tests are required to advance the GPU completed serial to reuse heaps. + // This requires Tick() to be called at-least |kFrameDepth| times. This constant + // should be updated if the internals of Tick() change. + constexpr uint32_t kFrameDepth = 2; -class D3D12DescriptorHeapTests : public DawnTest { - protected: - void SetUp() override { - DawnTest::SetUp(); - DAWN_TEST_UNSUPPORTED_IF(UsesWire()); - mD3DDevice = reinterpret_cast(device.Get()); + class D3D12DescriptorHeapTests : public DawnTest { + protected: + void SetUp() override { + DawnTest::SetUp(); + DAWN_TEST_UNSUPPORTED_IF(UsesWire()); + mD3DDevice = reinterpret_cast(device.Get()); - mSimpleVSModule = utils::CreateShaderModule(device, R"( + mSimpleVSModule = utils::CreateShaderModule(device, R"( @stage(vertex) fn main( @builtin(vertex_index) VertexIndex : u32 @@ -55,7 +55,7 @@ class D3D12DescriptorHeapTests : public DawnTest { return vec4(pos[VertexIndex], 0.0, 1.0); })"); - mSimpleFSModule = utils::CreateShaderModule(device, R"( + mSimpleFSModule = utils::CreateShaderModule(device, R"( struct U { color : vec4 } @@ -64,389 +64,397 @@ class D3D12DescriptorHeapTests : public DawnTest { @stage(fragment) fn main() -> @location(0) vec4 { return colorBuffer.color; })"); - } - - utils::BasicRenderPass MakeRenderPass(uint32_t width, - uint32_t height, - wgpu::TextureFormat format) { - DAWN_ASSERT(width > 0 && height > 0); - - wgpu::TextureDescriptor descriptor; - descriptor.dimension = wgpu::TextureDimension::e2D; - descriptor.size.width = width; - descriptor.size.height = height; - descriptor.size.depthOrArrayLayers = 1; - descriptor.sampleCount = 1; - descriptor.format = format; - descriptor.mipLevelCount = 1; - descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc; - wgpu::Texture color = device.CreateTexture(&descriptor); - - return utils::BasicRenderPass(width, height, color); - } - - std::array GetSolidColor(uint32_t n) const { - ASSERT(n >> 24 == 0); - float b = (n & 0xFF) / 255.0f; - float g = ((n >> 8) & 0xFF) / 255.0f; - float r = ((n >> 16) & 0xFF) / 255.0f; - return {r, g, b, 1}; - } - - Device* mD3DDevice = nullptr; - - wgpu::ShaderModule mSimpleVSModule; - wgpu::ShaderModule mSimpleFSModule; -}; - -class PlaceholderStagingDescriptorAllocator { - public: - PlaceholderStagingDescriptorAllocator(Device* device, - uint32_t descriptorCount, - uint32_t allocationsPerHeap) - : mAllocator(device, - descriptorCount, - allocationsPerHeap * descriptorCount, - D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) { - } - - CPUDescriptorHeapAllocation AllocateCPUDescriptors() { - dawn::native::ResultOrError result = - mAllocator.AllocateCPUDescriptors(); - return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{}; - } - - void Deallocate(CPUDescriptorHeapAllocation& allocation) { - mAllocator.Deallocate(&allocation); - } - - private: - StagingDescriptorAllocator mAllocator; -}; - -// Verify the shader visible view heaps switch over within a single submit. -TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) { - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); - - utils::ComboRenderPipelineDescriptor renderPipelineDescriptor; - - // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a - // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over. - renderPipelineDescriptor.vertex.module = mSimpleVSModule; - renderPipelineDescriptor.cFragment.module = mSimpleFSModule; - - wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor); - utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); - - Device* d3dDevice = reinterpret_cast(device.Get()); - ShaderVisibleDescriptorAllocator* allocator = - d3dDevice->GetViewShaderVisibleDescriptorAllocator(); - const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting(); - - const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - { - wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); - - pass.SetPipeline(renderPipeline); - - std::array redColor = {1, 0, 0, 1}; - wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( - device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); - - for (uint32_t i = 0; i < heapSize + 1; ++i) { - pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), - {{0, uniformBuffer, 0, sizeof(redColor)}})); - pass.Draw(3); } - pass.End(); + utils::BasicRenderPass MakeRenderPass(uint32_t width, + uint32_t height, + wgpu::TextureFormat format) { + DAWN_ASSERT(width > 0 && height > 0); + + wgpu::TextureDescriptor descriptor; + descriptor.dimension = wgpu::TextureDimension::e2D; + descriptor.size.width = width; + descriptor.size.height = height; + descriptor.size.depthOrArrayLayers = 1; + descriptor.sampleCount = 1; + descriptor.format = format; + descriptor.mipLevelCount = 1; + descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc; + wgpu::Texture color = device.CreateTexture(&descriptor); + + return utils::BasicRenderPass(width, height, color); + } + + std::array GetSolidColor(uint32_t n) const { + ASSERT(n >> 24 == 0); + float b = (n & 0xFF) / 255.0f; + float g = ((n >> 8) & 0xFF) / 255.0f; + float r = ((n >> 16) & 0xFF) / 255.0f; + return {r, g, b, 1}; + } + + Device* mD3DDevice = nullptr; + + wgpu::ShaderModule mSimpleVSModule; + wgpu::ShaderModule mSimpleFSModule; + }; + + class PlaceholderStagingDescriptorAllocator { + public: + PlaceholderStagingDescriptorAllocator(Device* device, + uint32_t descriptorCount, + uint32_t allocationsPerHeap) + : mAllocator(device, + descriptorCount, + allocationsPerHeap * descriptorCount, + D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) { + } + + CPUDescriptorHeapAllocation AllocateCPUDescriptors() { + dawn::native::ResultOrError result = + mAllocator.AllocateCPUDescriptors(); + return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{}; + } + + void Deallocate(CPUDescriptorHeapAllocation& allocation) { + mAllocator.Deallocate(&allocation); + } + + private: + StagingDescriptorAllocator mAllocator; + }; + + // Verify the shader visible view heaps switch over within a single submit. + TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) { + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + + utils::ComboRenderPipelineDescriptor renderPipelineDescriptor; + + // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a + // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over. + renderPipelineDescriptor.vertex.module = mSimpleVSModule; + renderPipelineDescriptor.cFragment.module = mSimpleFSModule; + + wgpu::RenderPipeline renderPipeline = + device.CreateRenderPipeline(&renderPipelineDescriptor); + utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); + + Device* d3dDevice = reinterpret_cast(device.Get()); + ShaderVisibleDescriptorAllocator* allocator = + d3dDevice->GetViewShaderVisibleDescriptorAllocator(); + const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting(); + + const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); + + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + { + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); + + pass.SetPipeline(renderPipeline); + + std::array redColor = {1, 0, 0, 1}; + wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( + device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); + + for (uint32_t i = 0; i < heapSize + 1; ++i) { + pass.SetBindGroup(0, + utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), + {{0, uniformBuffer, 0, sizeof(redColor)}})); + pass.Draw(3); + } + + pass.End(); + } + + wgpu::CommandBuffer commands = encoder.Finish(); + queue.Submit(1, &commands); + + EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1)); } - wgpu::CommandBuffer commands = encoder.Finish(); - queue.Submit(1, &commands); + // Verify the shader visible sampler heaps does not switch over within a single submit. + TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) { + utils::ComboRenderPipelineDescriptor renderPipelineDescriptor; - EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1)); -} - -// Verify the shader visible sampler heaps does not switch over within a single submit. -TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) { - utils::ComboRenderPipelineDescriptor renderPipelineDescriptor; - - // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating a - // sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over - // because the sampler heap allocations are de-duplicated. - renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"( + // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating + // a sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over + // because the sampler heap allocations are de-duplicated. + renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"( @stage(vertex) fn main() -> @builtin(position) vec4 { return vec4(0.0, 0.0, 0.0, 1.0); })"); - renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"( + renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"( @group(0) @binding(0) var sampler0 : sampler; @stage(fragment) fn main() -> @location(0) vec4 { _ = sampler0; return vec4(0.0, 0.0, 0.0, 0.0); })"); - wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor); - utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); + wgpu::RenderPipeline renderPipeline = + device.CreateRenderPipeline(&renderPipelineDescriptor); + utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); - wgpu::Sampler sampler = device.CreateSampler(); + wgpu::Sampler sampler = device.CreateSampler(); - Device* d3dDevice = reinterpret_cast(device.Get()); - ShaderVisibleDescriptorAllocator* allocator = - d3dDevice->GetSamplerShaderVisibleDescriptorAllocator(); - const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting(); + Device* d3dDevice = reinterpret_cast(device.Get()); + ShaderVisibleDescriptorAllocator* allocator = + d3dDevice->GetSamplerShaderVisibleDescriptorAllocator(); + const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting(); - const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting(); + const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting(); - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - { - wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + { + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); - pass.SetPipeline(renderPipeline); + pass.SetPipeline(renderPipeline); - for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) { - pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), - {{0, sampler}})); - pass.Draw(3); + for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) { + pass.SetBindGroup(0, + utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), + {{0, sampler}})); + pass.Draw(3); + } + + pass.End(); } - pass.End(); + wgpu::CommandBuffer commands = encoder.Finish(); + queue.Submit(1, &commands); + + EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID); } - wgpu::CommandBuffer commands = encoder.Finish(); - queue.Submit(1, &commands); + // Verify shader-visible heaps can be recycled for multiple submits. + TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) { + // Use small heaps to count only pool-allocated switches. + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); - EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID); -} + ShaderVisibleDescriptorAllocator* allocator = + mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); -// Verify shader-visible heaps can be recycled for multiple submits. -TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) { - // Use small heaps to count only pool-allocated switches. - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + std::list> heaps = {allocator->GetShaderVisibleHeap()}; - ShaderVisibleDescriptorAllocator* allocator = - mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); - std::list> heaps = {allocator->GetShaderVisibleHeap()}; + // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always + // unique. + for (uint32_t i = 0; i < kFrameDepth; i++) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); + heaps.push_back(heap); + // CheckPassedSerials() will update the last internally completed serial. + EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess()); + // NextSerial() will increment the last internally submitted serial. + EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess()); + } - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); + // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order + // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in + // the check. + for (uint32_t i = 0; i < kFrameDepth + 1; i++) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(heaps.front() == heap); + heaps.pop_front(); + EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess()); + EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess()); + } - // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always unique. - for (uint32_t i = 0; i < kFrameDepth; i++) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); - heaps.push_back(heap); - // CheckPassedSerials() will update the last internally completed serial. - EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess()); - // NextSerial() will increment the last internally submitted serial. - EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess()); + EXPECT_TRUE(heaps.empty()); + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth); } - // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order - // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in the - // check. - for (uint32_t i = 0; i < kFrameDepth + 1; i++) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(heaps.front() == heap); - heaps.pop_front(); - EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess()); - EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess()); + // Verify shader-visible heaps do not recycle in a pending submit. + TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) { + // Use small heaps to count only pool-allocated switches. + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + + constexpr uint32_t kNumOfSwitches = 5; + + ShaderVisibleDescriptorAllocator* allocator = + mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); + + const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); + + std::set> heaps = {allocator->GetShaderVisibleHeap()}; + + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); + + // Switch-over |kNumOfSwitches| and ensure heaps are always unique. + for (uint32_t i = 0; i < kNumOfSwitches; i++) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); + heaps.insert(heap); + } + + // After |kNumOfSwitches|, no heaps are recycled. + EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), + heapSerial + HeapVersionID(kNumOfSwitches)); + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches); } - EXPECT_TRUE(heaps.empty()); - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth); -} + // Verify switching shader-visible heaps do not recycle in a pending submit but do so + // once no longer pending. + TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) { + // Use small heaps to count only pool-allocated switches. + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); -// Verify shader-visible heaps do not recycle in a pending submit. -TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) { - // Use small heaps to count only pool-allocated switches. - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + constexpr uint32_t kNumOfSwitches = 5; - constexpr uint32_t kNumOfSwitches = 5; + ShaderVisibleDescriptorAllocator* allocator = + mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); + const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); - ShaderVisibleDescriptorAllocator* allocator = - mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); + std::set> heaps = {allocator->GetShaderVisibleHeap()}; - const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); - std::set> heaps = {allocator->GetShaderVisibleHeap()}; + // Switch-over |kNumOfSwitches| to create a pool of unique heaps. + for (uint32_t i = 0; i < kNumOfSwitches; i++) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); + heaps.insert(heap); + } - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); + EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), + heapSerial + HeapVersionID(kNumOfSwitches)); + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches); - // Switch-over |kNumOfSwitches| and ensure heaps are always unique. - for (uint32_t i = 0; i < kNumOfSwitches; i++) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); - heaps.insert(heap); + // Ensure switched-over heaps can be recycled by advancing the GPU by at-least + // |kFrameDepth|. + for (uint32_t i = 0; i < kFrameDepth; i++) { + mD3DDevice->APITick(); + } + + // Switch-over |kNumOfSwitches| again reusing the same heaps. + for (uint32_t i = 0; i < kNumOfSwitches; i++) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end()); + heaps.erase(heap); + } + + // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist. + EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), + heapSerial + HeapVersionID(kNumOfSwitches * 2)); + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches); } - // After |kNumOfSwitches|, no heaps are recycled. - EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), - heapSerial + HeapVersionID(kNumOfSwitches)); - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches); -} + // Verify shader-visible heaps do not recycle in multiple submits. + TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) { + ShaderVisibleDescriptorAllocator* allocator = + mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); -// Verify switching shader-visible heaps do not recycle in a pending submit but do so -// once no longer pending. -TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) { - // Use small heaps to count only pool-allocated switches. - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); - constexpr uint32_t kNumOfSwitches = 5; + std::set> heaps = {allocator->GetShaderVisibleHeap()}; - ShaderVisibleDescriptorAllocator* allocator = - mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); - const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); - std::set> heaps = {allocator->GetShaderVisibleHeap()}; + // Growth: Allocate + Tick() and ensure heaps are always unique. + while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); + heaps.insert(heap); + mD3DDevice->APITick(); + } - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); - - // Switch-over |kNumOfSwitches| to create a pool of unique heaps. - for (uint32_t i = 0; i < kNumOfSwitches; i++) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); - heaps.insert(heap); + // Verify the number of switches equals the size of heaps allocated (minus the initial). + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u); + EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), + heapSerial + HeapVersionID(heaps.size() - 1)); } - EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), - heapSerial + HeapVersionID(kNumOfSwitches)); - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches); + // Verify shader-visible heaps do not recycle in a pending submit. + TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) { + ShaderVisibleDescriptorAllocator* allocator = + mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); - // Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|. - for (uint32_t i = 0; i < kFrameDepth; i++) { - mD3DDevice->APITick(); + const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); + + std::set> heaps = {allocator->GetShaderVisibleHeap()}; + + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); + + // Growth: Allocate new heaps. + while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); + heaps.insert(heap); + } + + // Verify the number of switches equals the size of heaps allocated (minus the initial). + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u); + EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), + heapSerial + HeapVersionID(heaps.size() - 1)); } - // Switch-over |kNumOfSwitches| again reusing the same heaps. - for (uint32_t i = 0; i < kNumOfSwitches; i++) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end()); - heaps.erase(heap); + // Verify switching shader-visible heaps do not recycle in a pending submit but do so + // once no longer pending. + // Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated. + TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) { + ShaderVisibleDescriptorAllocator* allocator = + mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); + + std::set> heaps = {allocator->GetShaderVisibleHeap()}; + + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); + + uint32_t kNumOfPooledHeaps = 5; + while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); + heaps.insert(heap); + } + + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps); + + // Ensure switched-over heaps can be recycled by advancing the GPU by at-least + // |kFrameDepth|. + for (uint32_t i = 0; i < kFrameDepth; i++) { + mD3DDevice->APITick(); + } + + // Switch-over the pool-allocated heaps. + for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) { + EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); + ComPtr heap = allocator->GetShaderVisibleHeap(); + EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); + } + + EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps); } - // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist. - EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), - heapSerial + HeapVersionID(kNumOfSwitches * 2)); - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches); -} + // Verify encoding multiple heaps worth of bindgroups. + // Shader-visible heaps will switch out |kNumOfHeaps| times. + TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) { + // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup + // that has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize| + // draws, the result is the arithmetic sum of the sequence after the framebuffer is blended + // by accumulation. By checking for this sum, we ensure each bindgroup was encoded + // correctly. + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); -// Verify shader-visible heaps do not recycle in multiple submits. -TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) { - ShaderVisibleDescriptorAllocator* allocator = - mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); + utils::BasicRenderPass renderPass = + MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float); - const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); + utils::ComboRenderPipelineDescriptor pipelineDescriptor; + pipelineDescriptor.vertex.module = mSimpleVSModule; - std::set> heaps = {allocator->GetShaderVisibleHeap()}; - - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); - - // Growth: Allocate + Tick() and ensure heaps are always unique. - while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); - heaps.insert(heap); - mD3DDevice->APITick(); - } - - // Verify the number of switches equals the size of heaps allocated (minus the initial). - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u); - EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), - heapSerial + HeapVersionID(heaps.size() - 1)); -} - -// Verify shader-visible heaps do not recycle in a pending submit. -TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) { - ShaderVisibleDescriptorAllocator* allocator = - mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); - - const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting(); - - std::set> heaps = {allocator->GetShaderVisibleHeap()}; - - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); - - // Growth: Allocate new heaps. - while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); - heaps.insert(heap); - } - - // Verify the number of switches equals the size of heaps allocated (minus the initial). - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u); - EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), - heapSerial + HeapVersionID(heaps.size() - 1)); -} - -// Verify switching shader-visible heaps do not recycle in a pending submit but do so -// once no longer pending. -// Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated. -TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) { - ShaderVisibleDescriptorAllocator* allocator = - mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); - - std::set> heaps = {allocator->GetShaderVisibleHeap()}; - - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u); - - uint32_t kNumOfPooledHeaps = 5; - while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); - heaps.insert(heap); - } - - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps); - - // Ensure switched-over heaps can be recycled by advancing the GPU by at-least |kFrameDepth|. - for (uint32_t i = 0; i < kFrameDepth; i++) { - mD3DDevice->APITick(); - } - - // Switch-over the pool-allocated heaps. - for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) { - EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess()); - ComPtr heap = allocator->GetShaderVisibleHeap(); - EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end()); - } - - EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps); -} - -// Verify encoding multiple heaps worth of bindgroups. -// Shader-visible heaps will switch out |kNumOfHeaps| times. -TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) { - // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup that - // has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize| draws, - // the result is the arithmetic sum of the sequence after the framebuffer is blended by - // accumulation. By checking for this sum, we ensure each bindgroup was encoded correctly. - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); - - utils::BasicRenderPass renderPass = - MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float); - - utils::ComboRenderPipelineDescriptor pipelineDescriptor; - pipelineDescriptor.vertex.module = mSimpleVSModule; - - pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"( + pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"( struct U { heapSize : f32 } @@ -456,130 +464,42 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) { return vec4(buffer0.heapSize, 0.0, 0.0, 1.0); })"); - wgpu::BlendState blend; - blend.color.operation = wgpu::BlendOperation::Add; - blend.color.srcFactor = wgpu::BlendFactor::One; - blend.color.dstFactor = wgpu::BlendFactor::One; - blend.alpha.operation = wgpu::BlendOperation::Add; - blend.alpha.srcFactor = wgpu::BlendFactor::One; - blend.alpha.dstFactor = wgpu::BlendFactor::One; + wgpu::BlendState blend; + blend.color.operation = wgpu::BlendOperation::Add; + blend.color.srcFactor = wgpu::BlendFactor::One; + blend.color.dstFactor = wgpu::BlendFactor::One; + blend.alpha.operation = wgpu::BlendOperation::Add; + blend.alpha.srcFactor = wgpu::BlendFactor::One; + blend.alpha.dstFactor = wgpu::BlendFactor::One; - pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float; - pipelineDescriptor.cTargets[0].blend = &blend; + pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float; + pipelineDescriptor.cTargets[0].blend = &blend; - wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor); + wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor); - const uint32_t heapSize = - mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting(); - - constexpr uint32_t kNumOfHeaps = 2; - - const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize; - - std::vector bindGroups; - for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) { - const float color = i + 1; - wgpu::Buffer uniformBuffer = - utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform); - bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), - {{0, uniformBuffer}})); - } - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - { - wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); - - pass.SetPipeline(renderPipeline); - - for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) { - pass.SetBindGroup(0, bindGroups[i]); - pass.Draw(3); - } - - pass.End(); - } - - wgpu::CommandBuffer commands = encoder.Finish(); - queue.Submit(1, &commands); - - float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2; - EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0); -} - -// Verify encoding one bindgroup then a heaps worth in different submits. -// Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors. -// The first descriptor's memory will be reused when the second submit encodes |heapSize| -// descriptors. -TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) { - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); - - // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP. - DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP()); - - utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); - - utils::ComboRenderPipelineDescriptor pipelineDescriptor; - pipelineDescriptor.vertex.module = mSimpleVSModule; - pipelineDescriptor.cFragment.module = mSimpleFSModule; - pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; - - wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor); - - // Encode the first descriptor and submit. - { - std::array greenColor = {0, 1, 0, 1}; - wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( - device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform); - - wgpu::BindGroup bindGroup = utils::MakeBindGroup( - device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}}); - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - { - wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); - - pass.SetPipeline(renderPipeline); - pass.SetBindGroup(0, bindGroup); - pass.Draw(3); - pass.End(); - } - - wgpu::CommandBuffer commands = encoder.Finish(); - queue.Submit(1, &commands); - } - - EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0); - - // Encode a heap worth of descriptors. - { - const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator() + const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator() ->GetShaderVisibleHeapSizeForTesting(); - std::vector bindGroups; - for (uint32_t i = 0; i < heapSize - 1; i++) { - std::array fillColor = GetSolidColor(i + 1); // Avoid black - wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( - device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); + constexpr uint32_t kNumOfHeaps = 2; + const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize; + + std::vector bindGroups; + for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) { + const float color = i + 1; + wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(device, &color, sizeof(color), + wgpu::BufferUsage::Uniform); bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}})); } - std::array redColor = {1, 0, 0, 1}; - wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData( - device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); - - bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), - {{0, lastUniformBuffer, 0, sizeof(redColor)}})); - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); { wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); pass.SetPipeline(renderPipeline); - for (uint32_t i = 0; i < heapSize; ++i) { + for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) { pass.SetBindGroup(0, bindGroups[i]); pass.Draw(3); } @@ -589,140 +509,149 @@ TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) { wgpu::CommandBuffer commands = encoder.Finish(); queue.Submit(1, &commands); + + float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2; + EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0); } - EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0); -} + // Verify encoding one bindgroup then a heaps worth in different submits. + // Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors. + // The first descriptor's memory will be reused when the second submit encodes |heapSize| + // descriptors. + TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) { + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); -// Verify encoding a heaps worth of bindgroups plus one more then reuse the first -// bindgroup in the same submit. -// Shader-visible heaps should switch out once then re-encode the first descriptor at a new offset -// in the heap. -TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) { - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP. + DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP()); - utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); + utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); - utils::ComboRenderPipelineDescriptor pipelineDescriptor; - pipelineDescriptor.vertex.module = mSimpleVSModule; - pipelineDescriptor.cFragment.module = mSimpleFSModule; - pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; + utils::ComboRenderPipelineDescriptor pipelineDescriptor; + pipelineDescriptor.vertex.module = mSimpleVSModule; + pipelineDescriptor.cFragment.module = mSimpleFSModule; + pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; - wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor); + wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor); - std::array redColor = {1, 0, 0, 1}; - wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData( - device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); + // Encode the first descriptor and submit. + { + std::array greenColor = {0, 1, 0, 1}; + wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( + device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform); - std::vector bindGroups = {utils::MakeBindGroup( - device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})}; + wgpu::BindGroup bindGroup = utils::MakeBindGroup( + device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}}); - const uint32_t heapSize = - mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting(); + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + { + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); - for (uint32_t i = 0; i < heapSize; i++) { - const std::array& fillColor = GetSolidColor(i + 1); // Avoid black - wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( - device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); - bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), - {{0, uniformBuffer, 0, sizeof(fillColor)}})); - } + pass.SetPipeline(renderPipeline); + pass.SetBindGroup(0, bindGroup); + pass.Draw(3); + pass.End(); + } - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - { - wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); - - pass.SetPipeline(pipeline); - - // Encode a heap worth of descriptors plus one more. - for (uint32_t i = 0; i < heapSize + 1; ++i) { - pass.SetBindGroup(0, bindGroups[i]); - pass.Draw(3); + wgpu::CommandBuffer commands = encoder.Finish(); + queue.Submit(1, &commands); } - // Re-encode the first bindgroup again. - pass.SetBindGroup(0, bindGroups[0]); - pass.Draw(3); + EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0); - pass.End(); + // Encode a heap worth of descriptors. + { + const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator() + ->GetShaderVisibleHeapSizeForTesting(); + + std::vector bindGroups; + for (uint32_t i = 0; i < heapSize - 1; i++) { + std::array fillColor = GetSolidColor(i + 1); // Avoid black + wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( + device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); + + bindGroups.push_back(utils::MakeBindGroup( + device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}})); + } + + std::array redColor = {1, 0, 0, 1}; + wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData( + device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); + + bindGroups.push_back( + utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0), + {{0, lastUniformBuffer, 0, sizeof(redColor)}})); + + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + { + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); + + pass.SetPipeline(renderPipeline); + + for (uint32_t i = 0; i < heapSize; ++i) { + pass.SetBindGroup(0, bindGroups[i]); + pass.Draw(3); + } + + pass.End(); + } + + wgpu::CommandBuffer commands = encoder.Finish(); + queue.Submit(1, &commands); + } + + EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0); } - wgpu::CommandBuffer commands = encoder.Finish(); - queue.Submit(1, &commands); + // Verify encoding a heaps worth of bindgroups plus one more then reuse the first + // bindgroup in the same submit. + // Shader-visible heaps should switch out once then re-encode the first descriptor at a new + // offset in the heap. + TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) { + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); - // Make sure the first bindgroup was encoded correctly. - EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0); -} + utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); -// Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the -// first bindgroup again in the second submit. -// Shader-visible heaps should switch out once then re-encode the -// first descriptor at the same offset in the heap. -TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) { - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + utils::ComboRenderPipelineDescriptor pipelineDescriptor; + pipelineDescriptor.vertex.module = mSimpleVSModule; + pipelineDescriptor.cFragment.module = mSimpleFSModule; + pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; - utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); + wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor); - utils::ComboRenderPipelineDescriptor pipelineDescriptor; - pipelineDescriptor.vertex.module = mSimpleVSModule; - pipelineDescriptor.cFragment.module = mSimpleFSModule; - pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; + std::array redColor = {1, 0, 0, 1}; + wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData( + device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); - wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor); + std::vector bindGroups = { + utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), + {{0, firstUniformBuffer, 0, sizeof(redColor)}})}; - // Encode heap worth of descriptors plus one more. - std::array redColor = {1, 0, 0, 1}; + const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator() + ->GetShaderVisibleHeapSizeForTesting(); - wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData( - device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); + for (uint32_t i = 0; i < heapSize; i++) { + const std::array& fillColor = GetSolidColor(i + 1); // Avoid black + wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( + device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); + bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), + {{0, uniformBuffer, 0, sizeof(fillColor)}})); + } - std::vector bindGroups = {utils::MakeBindGroup( - device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})}; - - const uint32_t heapSize = - mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting(); - - for (uint32_t i = 0; i < heapSize; i++) { - std::array fillColor = GetSolidColor(i + 1); // Avoid black - wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( - device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); - - bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), - {{0, uniformBuffer, 0, sizeof(fillColor)}})); - } - - { wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); { wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); pass.SetPipeline(pipeline); + // Encode a heap worth of descriptors plus one more. for (uint32_t i = 0; i < heapSize + 1; ++i) { pass.SetBindGroup(0, bindGroups[i]); pass.Draw(3); } - pass.End(); - } - - wgpu::CommandBuffer commands = encoder.Finish(); - queue.Submit(1, &commands); - } - - // Re-encode the first bindgroup again. - { - std::array greenColor = {0, 1, 0, 1}; - queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor)); - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - { - wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); - - pass.SetPipeline(pipeline); - + // Re-encode the first bindgroup again. pass.SetBindGroup(0, bindGroups[0]); pass.Draw(3); @@ -731,55 +660,137 @@ TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) { wgpu::CommandBuffer commands = encoder.Finish(); queue.Submit(1, &commands); + + // Make sure the first bindgroup was encoded correctly. + EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0); } - // Make sure the first bindgroup was re-encoded correctly. - EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0); -} + // Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the + // first bindgroup again in the second submit. + // Shader-visible heaps should switch out once then re-encode the + // first descriptor at the same offset in the heap. + TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) { + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); -// Verify encoding many sampler and ubo worth of bindgroups. -// Shader-visible heaps should switch out |kNumOfViewHeaps| times. -TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) { - DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( - dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); - // Create a solid filled texture. - wgpu::TextureDescriptor descriptor; - descriptor.dimension = wgpu::TextureDimension::e2D; - descriptor.size.width = kRTSize; - descriptor.size.height = kRTSize; - descriptor.size.depthOrArrayLayers = 1; - descriptor.sampleCount = 1; - descriptor.format = wgpu::TextureFormat::RGBA8Unorm; - descriptor.mipLevelCount = 1; - descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment | - wgpu::TextureUsage::CopySrc; - wgpu::Texture texture = device.CreateTexture(&descriptor); - wgpu::TextureView textureView = texture.CreateView(); - - { - utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture); - - utils::ComboRenderPassDescriptor renderPassDesc({textureView}); - renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear; - renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f}; - renderPass.renderPassInfo.cColorAttachments[0].view = textureView; - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - auto pass = encoder.BeginRenderPass(&renderPassDesc); - pass.End(); - - wgpu::CommandBuffer commandBuffer = encoder.Finish(); - queue.Submit(1, &commandBuffer); - - RGBA8 filled(0, 255, 0, 255); - EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0); - } - - { utils::ComboRenderPipelineDescriptor pipelineDescriptor; + pipelineDescriptor.vertex.module = mSimpleVSModule; + pipelineDescriptor.cFragment.module = mSimpleFSModule; + pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; - pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"( + wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor); + + // Encode heap worth of descriptors plus one more. + std::array redColor = {1, 0, 0, 1}; + + wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData( + device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); + + std::vector bindGroups = { + utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), + {{0, firstUniformBuffer, 0, sizeof(redColor)}})}; + + const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator() + ->GetShaderVisibleHeapSizeForTesting(); + + for (uint32_t i = 0; i < heapSize; i++) { + std::array fillColor = GetSolidColor(i + 1); // Avoid black + wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( + device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); + + bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), + {{0, uniformBuffer, 0, sizeof(fillColor)}})); + } + + { + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + { + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); + + pass.SetPipeline(pipeline); + + for (uint32_t i = 0; i < heapSize + 1; ++i) { + pass.SetBindGroup(0, bindGroups[i]); + pass.Draw(3); + } + + pass.End(); + } + + wgpu::CommandBuffer commands = encoder.Finish(); + queue.Submit(1, &commands); + } + + // Re-encode the first bindgroup again. + { + std::array greenColor = {0, 1, 0, 1}; + queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor)); + + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + { + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); + + pass.SetPipeline(pipeline); + + pass.SetBindGroup(0, bindGroups[0]); + pass.Draw(3); + + pass.End(); + } + + wgpu::CommandBuffer commands = encoder.Finish(); + queue.Submit(1, &commands); + } + + // Make sure the first bindgroup was re-encoded correctly. + EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0); + } + + // Verify encoding many sampler and ubo worth of bindgroups. + // Shader-visible heaps should switch out |kNumOfViewHeaps| times. + TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) { + DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled( + dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting)); + + // Create a solid filled texture. + wgpu::TextureDescriptor descriptor; + descriptor.dimension = wgpu::TextureDimension::e2D; + descriptor.size.width = kRTSize; + descriptor.size.height = kRTSize; + descriptor.size.depthOrArrayLayers = 1; + descriptor.sampleCount = 1; + descriptor.format = wgpu::TextureFormat::RGBA8Unorm; + descriptor.mipLevelCount = 1; + descriptor.usage = wgpu::TextureUsage::TextureBinding | + wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc; + wgpu::Texture texture = device.CreateTexture(&descriptor); + wgpu::TextureView textureView = texture.CreateView(); + + { + utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture); + + utils::ComboRenderPassDescriptor renderPassDesc({textureView}); + renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear; + renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f}; + renderPass.renderPassInfo.cColorAttachments[0].view = textureView; + + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + auto pass = encoder.BeginRenderPass(&renderPassDesc); + pass.End(); + + wgpu::CommandBuffer commandBuffer = encoder.Finish(); + queue.Submit(1, &commandBuffer); + + RGBA8 filled(0, 255, 0, 255); + EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0); + } + + { + utils::ComboRenderPipelineDescriptor pipelineDescriptor; + + pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"( struct U { transform : mat2x2 } @@ -795,7 +806,7 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) { ); return vec4(buffer0.transform * (pos[VertexIndex]), 0.0, 1.0); })"); - pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"( + pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"( struct U { color : vec4 } @@ -809,245 +820,251 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) { return textureSample(texture0, sampler0, FragCoord.xy) + buffer0.color; })"); - utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize); - pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; + utils::BasicRenderPass renderPass = + utils::CreateBasicRenderPass(device, kRTSize, kRTSize); + pipelineDescriptor.cTargets[0].format = renderPass.colorFormat; - wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor); + wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor); - // Encode a heap worth of descriptors |kNumOfHeaps| times. - constexpr float transform[] = {1.f, 0.f, 0.f, 1.f}; - wgpu::Buffer transformBuffer = utils::CreateBufferFromData( - device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform); + // Encode a heap worth of descriptors |kNumOfHeaps| times. + constexpr float transform[] = {1.f, 0.f, 0.f, 1.f}; + wgpu::Buffer transformBuffer = utils::CreateBufferFromData( + device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform); - wgpu::SamplerDescriptor samplerDescriptor; - wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor); + wgpu::SamplerDescriptor samplerDescriptor; + wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor); - ShaderVisibleDescriptorAllocator* viewAllocator = - mD3DDevice->GetViewShaderVisibleDescriptorAllocator(); + ShaderVisibleDescriptorAllocator* viewAllocator = + mD3DDevice->GetViewShaderVisibleDescriptorAllocator(); - ShaderVisibleDescriptorAllocator* samplerAllocator = - mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); + ShaderVisibleDescriptorAllocator* samplerAllocator = + mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator(); - const HeapVersionID viewHeapSerial = viewAllocator->GetShaderVisibleHeapSerialForTesting(); - const HeapVersionID samplerHeapSerial = - samplerAllocator->GetShaderVisibleHeapSerialForTesting(); + const HeapVersionID viewHeapSerial = + viewAllocator->GetShaderVisibleHeapSerialForTesting(); + const HeapVersionID samplerHeapSerial = + samplerAllocator->GetShaderVisibleHeapSerialForTesting(); - const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting(); + const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting(); - // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per - // group. This means the count of heaps switches is determined by the total number of views - // to encode. Compute the number of bindgroups to encode by counting the required views for - // |kNumOfViewHeaps| heaps worth. - constexpr uint32_t kViewsPerBindGroup = 3; - constexpr uint32_t kNumOfViewHeaps = 5; + // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per + // group. This means the count of heaps switches is determined by the total number of + // views to encode. Compute the number of bindgroups to encode by counting the required + // views for |kNumOfViewHeaps| heaps worth. + constexpr uint32_t kViewsPerBindGroup = 3; + constexpr uint32_t kNumOfViewHeaps = 5; - const uint32_t numOfEncodedBindGroups = - (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup; + const uint32_t numOfEncodedBindGroups = + (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup; - std::vector bindGroups; - for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) { - std::array fillColor = GetSolidColor(i + 1); // Avoid black - wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( - device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); + std::vector bindGroups; + for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) { + std::array fillColor = GetSolidColor(i + 1); // Avoid black + wgpu::Buffer uniformBuffer = utils::CreateBufferFromData( + device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform); - bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), - {{0, transformBuffer, 0, sizeof(transform)}, - {1, sampler}, - {2, textureView}, - {3, uniformBuffer, 0, sizeof(fillColor)}})); + bindGroups.push_back( + utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), + {{0, transformBuffer, 0, sizeof(transform)}, + {1, sampler}, + {2, textureView}, + {3, uniformBuffer, 0, sizeof(fillColor)}})); + } + + std::array redColor = {1, 0, 0, 1}; + wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData( + device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); + + bindGroups.push_back( + utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), + {{0, transformBuffer, 0, sizeof(transform)}, + {1, sampler}, + {2, textureView}, + {3, lastUniformBuffer, 0, sizeof(redColor)}})); + + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); + + pass.SetPipeline(pipeline); + + for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) { + pass.SetBindGroup(0, bindGroups[i]); + pass.Draw(3); + } + + pass.End(); + + wgpu::CommandBuffer commands = encoder.Finish(); + queue.Submit(1, &commands); + + // Final accumulated color is result of sampled + UBO color. + RGBA8 filled(255, 255, 0, 255); + RGBA8 notFilled(0, 0, 0, 0); + EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0); + EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0); + + EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps); + EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(), + viewHeapSerial + HeapVersionID(kNumOfViewHeaps)); + + EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u); + EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial); + } + } + + // Verify a single allocate/deallocate. + // One non-shader visible heap will be created. + TEST_P(D3D12DescriptorHeapTests, Single) { + constexpr uint32_t kDescriptorCount = 4; + constexpr uint32_t kAllocationsPerHeap = 3; + PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, + kAllocationsPerHeap); + + CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); + EXPECT_EQ(allocation.GetHeapIndex(), 0u); + EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); + + allocator.Deallocate(allocation); + EXPECT_FALSE(allocation.IsValid()); + } + + // Verify allocating many times causes the pool to increase in size. + // Creates |kNumOfHeaps| non-shader visible heaps. + TEST_P(D3D12DescriptorHeapTests, Sequential) { + constexpr uint32_t kDescriptorCount = 4; + constexpr uint32_t kAllocationsPerHeap = 3; + PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, + kAllocationsPerHeap); + + // Allocate |kNumOfHeaps| worth. + constexpr uint32_t kNumOfHeaps = 2; + + std::set allocatedHeaps; + + std::vector allocations; + for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) { + CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); + EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap); + EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); + allocations.push_back(allocation); + allocatedHeaps.insert(allocation.GetHeapIndex()); } - std::array redColor = {1, 0, 0, 1}; - wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData( - device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform); + EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps); - bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), - {{0, transformBuffer, 0, sizeof(transform)}, - {1, sampler}, - {2, textureView}, - {3, lastUniformBuffer, 0, sizeof(redColor)}})); + // Deallocate all. + for (CPUDescriptorHeapAllocation& allocation : allocations) { + allocator.Deallocate(allocation); + EXPECT_FALSE(allocation.IsValid()); + } + } - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo); + // Verify that re-allocating a number of allocations < pool size, all heaps are reused. + // Creates and reuses |kNumofHeaps| non-shader visible heaps. + TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) { + constexpr uint32_t kDescriptorCount = 4; + constexpr uint32_t kAllocationsPerHeap = 25; + PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, + kAllocationsPerHeap); - pass.SetPipeline(pipeline); + constexpr uint32_t kNumofHeaps = 10; - for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) { - pass.SetBindGroup(0, bindGroups[i]); - pass.Draw(3); + std::list allocations; + std::set allocationPtrs; + + // Allocate |kNumofHeaps| heaps worth. + for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { + CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); + allocations.push_back(allocation); + EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second); } - pass.End(); + // Deallocate all. + for (CPUDescriptorHeapAllocation& allocation : allocations) { + allocator.Deallocate(allocation); + EXPECT_FALSE(allocation.IsValid()); + } - wgpu::CommandBuffer commands = encoder.Finish(); - queue.Submit(1, &commands); + allocations.clear(); - // Final accumulated color is result of sampled + UBO color. - RGBA8 filled(255, 255, 0, 255); - RGBA8 notFilled(0, 0, 0, 0); - EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0); - EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0); + // Re-allocate all again. + std::set reallocatedPtrs; + for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { + CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); + allocations.push_back(allocation); + EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second); + EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(), + allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end()); + } - EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps); - EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(), - viewHeapSerial + HeapVersionID(kNumOfViewHeaps)); - - EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u); - EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial); - } -} - -// Verify a single allocate/deallocate. -// One non-shader visible heap will be created. -TEST_P(D3D12DescriptorHeapTests, Single) { - constexpr uint32_t kDescriptorCount = 4; - constexpr uint32_t kAllocationsPerHeap = 3; - PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, - kAllocationsPerHeap); - - CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); - EXPECT_EQ(allocation.GetHeapIndex(), 0u); - EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); - - allocator.Deallocate(allocation); - EXPECT_FALSE(allocation.IsValid()); -} - -// Verify allocating many times causes the pool to increase in size. -// Creates |kNumOfHeaps| non-shader visible heaps. -TEST_P(D3D12DescriptorHeapTests, Sequential) { - constexpr uint32_t kDescriptorCount = 4; - constexpr uint32_t kAllocationsPerHeap = 3; - PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, - kAllocationsPerHeap); - - // Allocate |kNumOfHeaps| worth. - constexpr uint32_t kNumOfHeaps = 2; - - std::set allocatedHeaps; - - std::vector allocations; - for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) { - CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); - EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap); - EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); - allocations.push_back(allocation); - allocatedHeaps.insert(allocation.GetHeapIndex()); + // Deallocate all again. + for (CPUDescriptorHeapAllocation& allocation : allocations) { + allocator.Deallocate(allocation); + EXPECT_FALSE(allocation.IsValid()); + } } - EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps); + // Verify allocating then deallocating many times. + TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) { + constexpr uint32_t kDescriptorCount = 4; + constexpr uint32_t kAllocationsPerHeap = 25; + PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, + kAllocationsPerHeap); - // Deallocate all. - for (CPUDescriptorHeapAllocation& allocation : allocations) { - allocator.Deallocate(allocation); - EXPECT_FALSE(allocation.IsValid()); - } -} + std::list list3; + std::list list5; + std::list allocations; -// Verify that re-allocating a number of allocations < pool size, all heaps are reused. -// Creates and reuses |kNumofHeaps| non-shader visible heaps. -TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) { - constexpr uint32_t kDescriptorCount = 4; - constexpr uint32_t kAllocationsPerHeap = 25; - PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, - kAllocationsPerHeap); + constexpr uint32_t kNumofHeaps = 2; - constexpr uint32_t kNumofHeaps = 10; + // Allocate |kNumofHeaps| heaps worth. + for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { + CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); + EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); + if (i % 3 == 0) { + list3.push_back(allocation); + } else { + allocations.push_back(allocation); + } + } - std::list allocations; - std::set allocationPtrs; + // Deallocate every 3rd allocation. + for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) { + allocator.Deallocate(*it); + } - // Allocate |kNumofHeaps| heaps worth. - for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { - CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); - allocations.push_back(allocation); - EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second); - } + // Allocate again. + for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { + CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); + EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); + if (i % 5 == 0) { + list5.push_back(allocation); + } else { + allocations.push_back(allocation); + } + } - // Deallocate all. - for (CPUDescriptorHeapAllocation& allocation : allocations) { - allocator.Deallocate(allocation); - EXPECT_FALSE(allocation.IsValid()); - } + // Deallocate every 5th allocation. + for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) { + allocator.Deallocate(*it); + } - allocations.clear(); - - // Re-allocate all again. - std::set reallocatedPtrs; - for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { - CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); - allocations.push_back(allocation); - EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second); - EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(), - allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end()); - } - - // Deallocate all again. - for (CPUDescriptorHeapAllocation& allocation : allocations) { - allocator.Deallocate(allocation); - EXPECT_FALSE(allocation.IsValid()); - } -} - -// Verify allocating then deallocating many times. -TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) { - constexpr uint32_t kDescriptorCount = 4; - constexpr uint32_t kAllocationsPerHeap = 25; - PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, - kAllocationsPerHeap); - - std::list list3; - std::list list5; - std::list allocations; - - constexpr uint32_t kNumofHeaps = 2; - - // Allocate |kNumofHeaps| heaps worth. - for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { - CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); - EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); - if (i % 3 == 0) { - list3.push_back(allocation); - } else { + // Allocate again. + for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { + CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); + EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); allocations.push_back(allocation); } - } - // Deallocate every 3rd allocation. - for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) { - allocator.Deallocate(*it); - } - - // Allocate again. - for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { - CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); - EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); - if (i % 5 == 0) { - list5.push_back(allocation); - } else { - allocations.push_back(allocation); + // Deallocate remaining. + for (CPUDescriptorHeapAllocation& allocation : allocations) { + allocator.Deallocate(allocation); + EXPECT_FALSE(allocation.IsValid()); } } - // Deallocate every 5th allocation. - for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) { - allocator.Deallocate(*it); - } + DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests, + D3D12Backend(), + D3D12Backend({"use_d3d12_small_shader_visible_heap"})); - // Allocate again. - for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) { - CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); - EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u); - allocations.push_back(allocation); - } - - // Deallocate remaining. - for (CPUDescriptorHeapAllocation& allocation : allocations) { - allocator.Deallocate(allocation); - EXPECT_FALSE(allocation.IsValid()); - } -} - -DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests, - D3D12Backend(), - D3D12Backend({"use_d3d12_small_shader_visible_heap"})); +} // namespace dawn::native::d3d12 diff --git a/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp index ce49598eba..b87d564a6b 100644 --- a/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp +++ b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp @@ -20,100 +20,101 @@ #include "dawn/tests/DawnTest.h" #include "dawn/utils/WGPUHelpers.h" -namespace { - class ExpectBetweenTimestamps : public detail::Expectation { - public: - ~ExpectBetweenTimestamps() override = default; +namespace dawn::native::d3d12 { + namespace { + class ExpectBetweenTimestamps : public ::detail::Expectation { + public: + ~ExpectBetweenTimestamps() override = default; - ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) { - mValue0 = value0; - mValue1 = value1; - } - - // Expect the actual results are between mValue0 and mValue1. - testing::AssertionResult Check(const void* data, size_t size) override { - const uint64_t* actual = static_cast(data); - for (size_t i = 0; i < size / sizeof(uint64_t); ++i) { - if (actual[i] < mValue0 || actual[i] > mValue1) { - return testing::AssertionFailure() - << "Expected data[" << i << "] to be between " << mValue0 << " and " - << mValue1 << ", actual " << actual[i] << std::endl; - } + ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) { + mValue0 = value0; + mValue1 = value1; } - return testing::AssertionSuccess(); + // Expect the actual results are between mValue0 and mValue1. + testing::AssertionResult Check(const void* data, size_t size) override { + const uint64_t* actual = static_cast(data); + for (size_t i = 0; i < size / sizeof(uint64_t); ++i) { + if (actual[i] < mValue0 || actual[i] > mValue1) { + return testing::AssertionFailure() + << "Expected data[" << i << "] to be between " << mValue0 << " and " + << mValue1 << ", actual " << actual[i] << std::endl; + } + } + + return testing::AssertionSuccess(); + } + + private: + uint64_t mValue0; + uint64_t mValue1; + }; + + } // anonymous namespace + + class D3D12GPUTimestampCalibrationTests : public DawnTest { + protected: + void SetUp() override { + DawnTest::SetUp(); + + DAWN_TEST_UNSUPPORTED_IF(UsesWire()); + // Requires that timestamp query feature is enabled and timestamp query conversion is + // disabled. + DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) || + !HasToggleEnabled("disable_timestamp_query_conversion")); } - private: - uint64_t mValue0; - uint64_t mValue1; + std::vector GetRequiredFeatures() override { + std::vector requiredFeatures = {}; + if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) { + requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery); + } + return requiredFeatures; + } }; -} // anonymous namespace + // Check that the timestamps got by timestamp query are between the two timestamps from + // GetClockCalibration() after the timestamp conversion is disabled. + TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) { + constexpr uint32_t kQueryCount = 2; -using namespace dawn::native::d3d12; + wgpu::QuerySetDescriptor querySetDescriptor; + querySetDescriptor.count = kQueryCount; + querySetDescriptor.type = wgpu::QueryType::Timestamp; + wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor); -class D3D12GPUTimestampCalibrationTests : public DawnTest { - protected: - void SetUp() override { - DawnTest::SetUp(); + wgpu::BufferDescriptor bufferDescriptor; + bufferDescriptor.size = kQueryCount * sizeof(uint64_t); + bufferDescriptor.usage = wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc | + wgpu::BufferUsage::CopyDst; + wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor); - DAWN_TEST_UNSUPPORTED_IF(UsesWire()); - // Requires that timestamp query feature is enabled and timestamp query conversion is - // disabled. - DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) || - !HasToggleEnabled("disable_timestamp_query_conversion")); + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + encoder.WriteTimestamp(querySet, 0); + encoder.WriteTimestamp(querySet, 1); + wgpu::CommandBuffer commands = encoder.Finish(); + + Device* d3DDevice = reinterpret_cast(device.Get()); + uint64_t gpuTimestamp0, gpuTimestamp1; + uint64_t cpuTimestamp0, cpuTimestamp1; + d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0); + queue.Submit(1, &commands); + WaitForAllOperations(); + d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1); + + // Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp, + // so that the timestamp in the querySet will be closer to both gpuTimestamps from + // GetClockCalibration. + wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder(); + resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0); + wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish(); + queue.Submit(1, &resolveCommands); + + EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), + new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1)); } - std::vector GetRequiredFeatures() override { - std::vector requiredFeatures = {}; - if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) { - requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery); - } - return requiredFeatures; - } -}; + DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests, + D3D12Backend({"disable_timestamp_query_conversion"})); -// Check that the timestamps got by timestamp query are between the two timestamps from -// GetClockCalibration() after the timestamp conversion is disabled. -TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) { - constexpr uint32_t kQueryCount = 2; - - wgpu::QuerySetDescriptor querySetDescriptor; - querySetDescriptor.count = kQueryCount; - querySetDescriptor.type = wgpu::QueryType::Timestamp; - wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor); - - wgpu::BufferDescriptor bufferDescriptor; - bufferDescriptor.size = kQueryCount * sizeof(uint64_t); - bufferDescriptor.usage = - wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst; - wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor); - - wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); - encoder.WriteTimestamp(querySet, 0); - encoder.WriteTimestamp(querySet, 1); - wgpu::CommandBuffer commands = encoder.Finish(); - - Device* d3DDevice = reinterpret_cast(device.Get()); - uint64_t gpuTimestamp0, gpuTimestamp1; - uint64_t cpuTimestamp0, cpuTimestamp1; - d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0); - queue.Submit(1, &commands); - WaitForAllOperations(); - d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1); - - // Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp, - // so that the timestamp in the querySet will be closer to both gpuTimestamps from - // GetClockCalibration. - wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder(); - resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0); - wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish(); - queue.Submit(1, &resolveCommands); - - EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t), - new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1)); -} - -DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests, - D3D12Backend({"disable_timestamp_query_conversion"})); +} // namespace dawn::native::d3d12 diff --git a/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp index ab3eac24d8..911b2eb96b 100644 --- a/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp +++ b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp @@ -18,91 +18,93 @@ #include "dawn/native/d3d12/TextureD3D12.h" #include "dawn/tests/DawnTest.h" -using namespace dawn::native::d3d12; +namespace dawn::native::d3d12 { -class D3D12ResourceHeapTests : public DawnTest { - protected: - void SetUp() override { - DawnTest::SetUp(); - DAWN_TEST_UNSUPPORTED_IF(UsesWire()); - } - - std::vector GetRequiredFeatures() override { - mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC}); - if (!mIsBCFormatSupported) { - return {}; + class D3D12ResourceHeapTests : public DawnTest { + protected: + void SetUp() override { + DawnTest::SetUp(); + DAWN_TEST_UNSUPPORTED_IF(UsesWire()); } - return {wgpu::FeatureName::TextureCompressionBC}; + std::vector GetRequiredFeatures() override { + mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC}); + if (!mIsBCFormatSupported) { + return {}; + } + + return {wgpu::FeatureName::TextureCompressionBC}; + } + + bool IsBCFormatSupported() const { + return mIsBCFormatSupported; + } + + private: + bool mIsBCFormatSupported = false; + }; + + // Verify that creating a small compressed textures will be 4KB aligned. + TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) { + DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported()); + + // TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment. + DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP()); + + wgpu::TextureDescriptor descriptor; + descriptor.dimension = wgpu::TextureDimension::e2D; + descriptor.size.width = 8; + descriptor.size.height = 8; + descriptor.size.depthOrArrayLayers = 1; + descriptor.sampleCount = 1; + descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm; + descriptor.mipLevelCount = 1; + descriptor.usage = wgpu::TextureUsage::TextureBinding; + + // Create a smaller one that allows use of the smaller alignment. + wgpu::Texture texture = device.CreateTexture(&descriptor); + Texture* d3dTexture = reinterpret_cast(texture.Get()); + + EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment, + static_cast(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT)); + + // Create a larger one (>64KB) that forbids use the smaller alignment. + descriptor.size.width = 4096; + descriptor.size.height = 4096; + + texture = device.CreateTexture(&descriptor); + d3dTexture = reinterpret_cast(texture.Get()); + + EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment, + static_cast(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT)); } - bool IsBCFormatSupported() const { - return mIsBCFormatSupported; + // Verify creating a UBO will always be 256B aligned. + TEST_P(D3D12ResourceHeapTests, AlignUBO) { + // Create a small UBO + wgpu::BufferDescriptor descriptor; + descriptor.size = 4 * 1024; + descriptor.usage = wgpu::BufferUsage::Uniform; + + wgpu::Buffer buffer = device.CreateBuffer(&descriptor); + Buffer* d3dBuffer = reinterpret_cast(buffer.Get()); + + EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width % + static_cast(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)), + 0u); + + // Create a larger UBO + descriptor.size = (4 * 1024 * 1024) + 255; + descriptor.usage = wgpu::BufferUsage::Uniform; + + buffer = device.CreateBuffer(&descriptor); + d3dBuffer = reinterpret_cast(buffer.Get()); + + EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width % + static_cast(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)), + 0u); } - private: - bool mIsBCFormatSupported = false; -}; + DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend()); -// Verify that creating a small compressed textures will be 4KB aligned. -TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) { - DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported()); - - // TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment. - DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP()); - - wgpu::TextureDescriptor descriptor; - descriptor.dimension = wgpu::TextureDimension::e2D; - descriptor.size.width = 8; - descriptor.size.height = 8; - descriptor.size.depthOrArrayLayers = 1; - descriptor.sampleCount = 1; - descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm; - descriptor.mipLevelCount = 1; - descriptor.usage = wgpu::TextureUsage::TextureBinding; - - // Create a smaller one that allows use of the smaller alignment. - wgpu::Texture texture = device.CreateTexture(&descriptor); - Texture* d3dTexture = reinterpret_cast(texture.Get()); - - EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment, - static_cast(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT)); - - // Create a larger one (>64KB) that forbids use the smaller alignment. - descriptor.size.width = 4096; - descriptor.size.height = 4096; - - texture = device.CreateTexture(&descriptor); - d3dTexture = reinterpret_cast(texture.Get()); - - EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment, - static_cast(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT)); -} - -// Verify creating a UBO will always be 256B aligned. -TEST_P(D3D12ResourceHeapTests, AlignUBO) { - // Create a small UBO - wgpu::BufferDescriptor descriptor; - descriptor.size = 4 * 1024; - descriptor.usage = wgpu::BufferUsage::Uniform; - - wgpu::Buffer buffer = device.CreateBuffer(&descriptor); - Buffer* d3dBuffer = reinterpret_cast(buffer.Get()); - - EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width % - static_cast(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)), - 0u); - - // Create a larger UBO - descriptor.size = (4 * 1024 * 1024) + 255; - descriptor.usage = wgpu::BufferUsage::Uniform; - - buffer = device.CreateBuffer(&descriptor); - d3dBuffer = reinterpret_cast(buffer.Get()); - - EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width % - static_cast(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)), - 0u); -} - -DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend()); +} // namespace dawn::native::d3d12