Fix build/namespaces issues

This CL fixes up various build/namespaces lint errors and enables
the lint check.

Bug: dawn:1339
Change-Id: Ib2edd0019cb010e2c6226abce6cfee50a0b4b763
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87482
Kokoro: Kokoro <noreply+kokoro@google.com>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
This commit is contained in:
dan sinclair 2022-04-21 16:46:56 +00:00 committed by Dawn LUCI CQ
parent 6cb57a9847
commit b0acbd436d
44 changed files with 7683 additions and 7402 deletions

View File

@ -1,3 +1,2 @@
filter=-build/namespaces
filter=-readability/todo filter=-readability/todo
filter=-runtime/indentation_namespace filter=-runtime/indentation_namespace

View File

@ -32,9 +32,7 @@
namespace dawn::native::metal { namespace dawn::native::metal {
namespace { struct KalmanInfo;
struct KalmanInfo;
}
class Device final : public DeviceBase { class Device final : public DeviceBase {
public: public:

View File

@ -42,18 +42,18 @@
namespace dawn::native::metal { namespace dawn::native::metal {
struct KalmanInfo {
float filterValue; // The estimation value
float kalmanGain; // The kalman gain
float R; // The covariance of the observation noise
float P; // The a posteriori estimate covariance
};
namespace { namespace {
// The time interval for each round of kalman filter // The time interval for each round of kalman filter
static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10); static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
struct KalmanInfo {
float filterValue; // The estimation value
float kalmanGain; // The kalman gain
float R; // The covariance of the observation noise
float P; // The a posteriori estimate covariance
};
// A simplified kalman filter for estimating timestamp period based on measured values // A simplified kalman filter for estimating timestamp period based on measured values
float KalmanFilter(KalmanInfo* info, float measuredValue) { float KalmanFilter(KalmanInfo* info, float measuredValue) {
// Optimize kalman gain // Optimize kalman gain

View File

@ -45,7 +45,9 @@
namespace { namespace {
using namespace testing; using testing::_;
using testing::MockCallback;
using testing::SaveArg;
class AdapterDiscoveryTests : public ::testing::Test {}; class AdapterDiscoveryTests : public ::testing::Test {};

View File

@ -22,7 +22,9 @@
#include "dawn/utils/ComboRenderPipelineDescriptor.h" #include "dawn/utils/ComboRenderPipelineDescriptor.h"
#include "dawn/utils/WGPUHelpers.h" #include "dawn/utils/WGPUHelpers.h"
using namespace testing; using testing::_;
using testing::Exactly;
using testing::MockCallback;
class MockDeviceLostCallback { class MockDeviceLostCallback {
public: public:

View File

@ -17,7 +17,7 @@
#include "dawn/tests/DawnTest.h" #include "dawn/tests/DawnTest.h"
#include "gmock/gmock.h" #include "gmock/gmock.h"
using namespace testing; using testing::InSequence;
class MockMapCallback { class MockMapCallback {
public: public:
@ -67,7 +67,7 @@ class QueueTimelineTests : public DawnTest {
// when queue.OnSubmittedWorkDone is called after mMapReadBuffer.MapAsync. The callback order should // when queue.OnSubmittedWorkDone is called after mMapReadBuffer.MapAsync. The callback order should
// happen in the order the functions are called. // happen in the order the functions are called.
TEST_P(QueueTimelineTests, MapRead_OnWorkDone) { TEST_P(QueueTimelineTests, MapRead_OnWorkDone) {
testing::InSequence sequence; InSequence sequence;
EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1); EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1);
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1); EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
@ -83,7 +83,7 @@ TEST_P(QueueTimelineTests, MapRead_OnWorkDone) {
// queue.Signal is called before mMapReadBuffer.MapAsync. The callback order should // queue.Signal is called before mMapReadBuffer.MapAsync. The callback order should
// happen in the order the functions are called. // happen in the order the functions are called.
TEST_P(QueueTimelineTests, OnWorkDone_MapRead) { TEST_P(QueueTimelineTests, OnWorkDone_MapRead) {
testing::InSequence sequence; InSequence sequence;
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1); EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1); EXPECT_CALL(*mockMapCallback, Call(WGPUBufferMapAsyncStatus_Success, this)).Times(1);

View File

@ -17,313 +17,315 @@
#include "dawn/native/BuddyAllocator.h" #include "dawn/native/BuddyAllocator.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
using namespace dawn::native; namespace dawn::native {
constexpr uint64_t BuddyAllocator::kInvalidOffset; constexpr uint64_t BuddyAllocator::kInvalidOffset;
// Verify the buddy allocator with a basic test. // Verify the buddy allocator with a basic test.
TEST(BuddyAllocatorTests, SingleBlock) { TEST(BuddyAllocatorTests, SingleBlock) {
// After one 32 byte allocation: // After one 32 byte allocation:
// //
// Level -------------------------------- // Level --------------------------------
// 0 32 | A | // 0 32 | A |
// -------------------------------- // --------------------------------
// //
constexpr uint64_t maxBlockSize = 32; constexpr uint64_t maxBlockSize = 32;
BuddyAllocator allocator(maxBlockSize);
// Check that we cannot allocate a oversized block.
ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
// Check that we cannot allocate a zero sized block.
ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset);
// Allocate the block.
uint64_t blockOffset = allocator.Allocate(maxBlockSize);
ASSERT_EQ(blockOffset, 0u);
// Check that we are full.
ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
// Deallocate the block.
allocator.Deallocate(blockOffset);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
}
// Verify multiple allocations succeeds using a buddy allocator.
TEST(BuddyAllocatorTests, MultipleBlocks) {
// Fill every level in the allocator (order-n = 2^n)
const uint64_t maxBlockSize = (1ull << 16);
for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) {
BuddyAllocator allocator(maxBlockSize); BuddyAllocator allocator(maxBlockSize);
uint64_t blockSize = (1ull << order); // Check that we cannot allocate a oversized block.
for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) { ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki);
// Check that we cannot allocate a zero sized block.
ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset);
// Allocate the block.
uint64_t blockOffset = allocator.Allocate(maxBlockSize);
ASSERT_EQ(blockOffset, 0u);
// Check that we are full.
ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
// Deallocate the block.
allocator.Deallocate(blockOffset);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
}
// Verify multiple allocations succeeds using a buddy allocator.
TEST(BuddyAllocatorTests, MultipleBlocks) {
// Fill every level in the allocator (order-n = 2^n)
const uint64_t maxBlockSize = (1ull << 16);
for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) {
BuddyAllocator allocator(maxBlockSize);
uint64_t blockSize = (1ull << order);
for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) {
ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki);
}
} }
} }
}
// Verify that a single allocation succeeds using a buddy allocator. // Verify that a single allocation succeeds using a buddy allocator.
TEST(BuddyAllocatorTests, SingleSplitBlock) { TEST(BuddyAllocatorTests, SingleSplitBlock) {
// After one 8 byte allocation: // After one 8 byte allocation:
// //
// Level -------------------------------- // Level --------------------------------
// 0 32 | S | // 0 32 | S |
// -------------------------------- // --------------------------------
// 1 16 | S | F | S - split // 1 16 | S | F | S - split
// -------------------------------- F - free // -------------------------------- F - free
// 2 8 | A | F | | | A - allocated // 2 8 | A | F | | | A - allocated
// -------------------------------- // --------------------------------
// //
constexpr uint64_t maxBlockSize = 32; constexpr uint64_t maxBlockSize = 32;
BuddyAllocator allocator(maxBlockSize); BuddyAllocator allocator(maxBlockSize);
// Allocate block (splits two blocks). // Allocate block (splits two blocks).
uint64_t blockOffset = allocator.Allocate(8); uint64_t blockOffset = allocator.Allocate(8);
ASSERT_EQ(blockOffset, 0u); ASSERT_EQ(blockOffset, 0u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Deallocate block (merges two blocks). // Deallocate block (merges two blocks).
allocator.Deallocate(blockOffset); allocator.Deallocate(blockOffset);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Check that we cannot allocate a block that is oversized. // Check that we cannot allocate a block that is oversized.
ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset); ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
// Re-allocate the largest block allowed after merging. // Re-allocate the largest block allowed after merging.
blockOffset = allocator.Allocate(maxBlockSize); blockOffset = allocator.Allocate(maxBlockSize);
ASSERT_EQ(blockOffset, 0u); ASSERT_EQ(blockOffset, 0u);
allocator.Deallocate(blockOffset); allocator.Deallocate(blockOffset);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
}
// Verify that a multiple allocated blocks can be removed in the free-list.
TEST(BuddyAllocatorTests, MultipleSplitBlocks) {
// After four 16 byte allocations:
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | S | S - split
// -------------------------------- F - free
// 2 8 | Aa | Ab | Ac | Ad | A - allocated
// --------------------------------
//
constexpr uint64_t maxBlockSize = 32;
BuddyAllocator allocator(maxBlockSize);
// Populates the free-list with four blocks at Level2.
// Allocate "a" block (two splits).
constexpr uint64_t blockSizeInBytes = 8;
uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetA, 0u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Allocate "b" block.
uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetB, blockSizeInBytes);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Allocate "c" block (three splits).
uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Allocate "d" block.
uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
// Deallocate "d" block.
// FreeList[Level2] = [BlockD] -> x
allocator.Deallocate(blockOffsetD);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Deallocate "b" block.
// FreeList[Level2] = [BlockB] -> [BlockD] -> x
allocator.Deallocate(blockOffsetB);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Deallocate "c" block (one merges).
// FreeList[Level1] = [BlockCD] -> x
// FreeList[Level2] = [BlockB] -> x
allocator.Deallocate(blockOffsetC);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Deallocate "a" block (two merges).
// FreeList[Level0] = [BlockABCD] -> x
allocator.Deallocate(blockOffsetA);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
}
// Verify the buddy allocator can handle allocations of various sizes.
TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) {
// After four Level4-to-Level1 byte then one L4 block allocations:
//
// Level -----------------------------------------------------------------
// 0 512 | S |
// -----------------------------------------------------------------
// 1 256 | S | A |
// -----------------------------------------------------------------
// 2 128 | S | A | | |
// -----------------------------------------------------------------
// 3 64 | S | A | | | | | | |
// -----------------------------------------------------------------
// 4 32 | A | F | | | | | | | | | | | | | | |
// -----------------------------------------------------------------
//
constexpr uint64_t maxBlockSize = 512;
BuddyAllocator allocator(maxBlockSize);
ASSERT_EQ(allocator.Allocate(32), 0ull);
ASSERT_EQ(allocator.Allocate(64), 64ull);
ASSERT_EQ(allocator.Allocate(128), 128ull);
ASSERT_EQ(allocator.Allocate(256), 256ull);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Fill in the last free block.
ASSERT_EQ(allocator.Allocate(32), 32ull);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
// Check if we're full.
ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset);
}
// Verify very small allocations using a larger allocator works correctly.
TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) {
// After allocating four pairs of one 64 byte block and one 32 byte block.
//
// Level -----------------------------------------------------------------
// 0 512 | S |
// -----------------------------------------------------------------
// 1 256 | S | S |
// -----------------------------------------------------------------
// 2 128 | S | S | S | F |
// -----------------------------------------------------------------
// 3 64 | A | S | A | A | S | A | | |
// -----------------------------------------------------------------
// 4 32 | | | A | A | | | | | A | A | | | | | | |
// -----------------------------------------------------------------
//
constexpr uint64_t maxBlockSize = 512;
BuddyAllocator allocator(maxBlockSize);
ASSERT_EQ(allocator.Allocate(64), 0ull);
ASSERT_EQ(allocator.Allocate(32), 64ull);
ASSERT_EQ(allocator.Allocate(64), 128ull);
ASSERT_EQ(allocator.Allocate(32), 96ull);
ASSERT_EQ(allocator.Allocate(64), 192ull);
ASSERT_EQ(allocator.Allocate(32), 256ull);
ASSERT_EQ(allocator.Allocate(64), 320ull);
ASSERT_EQ(allocator.Allocate(32), 288ull);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
}
// Verify the buddy allocator can deal with bad fragmentation.
TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
// Allocate every leaf then de-allocate every other of those allocations.
//
// Level -----------------------------------------------------------------
// 0 512 | S |
// -----------------------------------------------------------------
// 1 256 | S | S |
// -----------------------------------------------------------------
// 2 128 | S | S | S | S |
// -----------------------------------------------------------------
// 3 64 | S | S | S | S | S | S | S | S |
// -----------------------------------------------------------------
// 4 32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F |
// -----------------------------------------------------------------
//
constexpr uint64_t maxBlockSize = 512;
BuddyAllocator allocator(maxBlockSize);
// Allocate leaf blocks
constexpr uint64_t minBlockSizeInBytes = 32;
std::vector<uint64_t> blockOffsets;
for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) {
blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes));
} }
// Free every other leaf block. // Verify that a multiple allocated blocks can be removed in the free-list.
for (size_t count = 1; count < blockOffsets.size(); count += 2) { TEST(BuddyAllocatorTests, MultipleSplitBlocks) {
allocator.Deallocate(blockOffsets[count]); // After four 16 byte allocations:
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | S | S - split
// -------------------------------- F - free
// 2 8 | Aa | Ab | Ac | Ad | A - allocated
// --------------------------------
//
constexpr uint64_t maxBlockSize = 32;
BuddyAllocator allocator(maxBlockSize);
// Populates the free-list with four blocks at Level2.
// Allocate "a" block (two splits).
constexpr uint64_t blockSizeInBytes = 8;
uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetA, 0u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Allocate "b" block.
uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetB, blockSizeInBytes);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Allocate "c" block (three splits).
uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Allocate "d" block.
uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes);
ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
// Deallocate "d" block.
// FreeList[Level2] = [BlockD] -> x
allocator.Deallocate(blockOffsetD);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Deallocate "b" block.
// FreeList[Level2] = [BlockB] -> [BlockD] -> x
allocator.Deallocate(blockOffsetB);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Deallocate "c" block (one merges).
// FreeList[Level1] = [BlockCD] -> x
// FreeList[Level2] = [BlockB] -> x
allocator.Deallocate(blockOffsetC);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Deallocate "a" block (two merges).
// FreeList[Level0] = [BlockABCD] -> x
allocator.Deallocate(blockOffsetA);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
} }
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u); // Verify the buddy allocator can handle allocations of various sizes.
} TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) {
// After four Level4-to-Level1 byte then one L4 block allocations:
//
// Level -----------------------------------------------------------------
// 0 512 | S |
// -----------------------------------------------------------------
// 1 256 | S | A |
// -----------------------------------------------------------------
// 2 128 | S | A | | |
// -----------------------------------------------------------------
// 3 64 | S | A | | | | | | |
// -----------------------------------------------------------------
// 4 32 | A | F | | | | | | | | | | | | | | |
// -----------------------------------------------------------------
//
constexpr uint64_t maxBlockSize = 512;
BuddyAllocator allocator(maxBlockSize);
// Verify the buddy allocator can deal with multiple allocations with mixed alignments. ASSERT_EQ(allocator.Allocate(32), 0ull);
TEST(BuddyAllocatorTests, SameSizeVariousAlignment) { ASSERT_EQ(allocator.Allocate(64), 64ull);
// After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8 byte ASSERT_EQ(allocator.Allocate(128), 128ull);
// alignment. ASSERT_EQ(allocator.Allocate(256), 256ull);
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | S | S - split
// -------------------------------- F - free
// 2 8 | Aa | F | Ab | Ac | A - allocated
// --------------------------------
//
BuddyAllocator allocator(32);
// Allocate Aa (two splits). ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
ASSERT_EQ(allocator.Allocate(8, 16), 0u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Allocate Ab (skip Aa buddy due to alignment and perform another split). // Fill in the last free block.
ASSERT_EQ(allocator.Allocate(8, 16), 16u); ASSERT_EQ(allocator.Allocate(32), 32ull);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u); ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
// Check that we cannot fit another. // Check if we're full.
ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset); ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset);
}
// Allocate Ac (zero splits and Ab's buddy is now the first free block). // Verify very small allocations using a larger allocator works correctly.
ASSERT_EQ(allocator.Allocate(8, 8), 24u); TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) {
// After allocating four pairs of one 64 byte block and one 32 byte block.
//
// Level -----------------------------------------------------------------
// 0 512 | S |
// -----------------------------------------------------------------
// 1 256 | S | S |
// -----------------------------------------------------------------
// 2 128 | S | S | S | F |
// -----------------------------------------------------------------
// 3 64 | A | S | A | A | S | A | | |
// -----------------------------------------------------------------
// 4 32 | | | A | A | | | | | A | A | | | | | | |
// -----------------------------------------------------------------
//
constexpr uint64_t maxBlockSize = 512;
BuddyAllocator allocator(maxBlockSize);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); ASSERT_EQ(allocator.Allocate(64), 0ull);
} ASSERT_EQ(allocator.Allocate(32), 64ull);
// Verify the buddy allocator can deal with multiple allocations with equal alignments. ASSERT_EQ(allocator.Allocate(64), 128ull);
TEST(BuddyAllocatorTests, VariousSizeSameAlignment) { ASSERT_EQ(allocator.Allocate(32), 96ull);
// After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4 byte
// alignment.
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | Ac | S - split
// -------------------------------- F - free
// 2 8 | Aa | Ab | | A - allocated
// --------------------------------
//
constexpr uint64_t maxBlockSize = 32;
constexpr uint64_t alignment = 4;
BuddyAllocator allocator(maxBlockSize);
// Allocate block Aa (two splits) ASSERT_EQ(allocator.Allocate(64), 192ull);
ASSERT_EQ(allocator.Allocate(8, alignment), 0u); ASSERT_EQ(allocator.Allocate(32), 256ull);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Allocate block Ab (Aa's buddy) ASSERT_EQ(allocator.Allocate(64), 320ull);
ASSERT_EQ(allocator.Allocate(8, alignment), 8u); ASSERT_EQ(allocator.Allocate(32), 288ull);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u); ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
}
// Check that we can still allocate Ac. // Verify the buddy allocator can deal with bad fragmentation.
ASSERT_EQ(allocator.Allocate(16, alignment), 16ull); TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
// Allocate every leaf then de-allocate every other of those allocations.
//
// Level -----------------------------------------------------------------
// 0 512 | S |
// -----------------------------------------------------------------
// 1 256 | S | S |
// -----------------------------------------------------------------
// 2 128 | S | S | S | S |
// -----------------------------------------------------------------
// 3 64 | S | S | S | S | S | S | S | S |
// -----------------------------------------------------------------
// 4 32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F |
// -----------------------------------------------------------------
//
constexpr uint64_t maxBlockSize = 512;
BuddyAllocator allocator(maxBlockSize);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u); // Allocate leaf blocks
} constexpr uint64_t minBlockSizeInBytes = 32;
std::vector<uint64_t> blockOffsets;
for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) {
blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes));
}
// Free every other leaf block.
for (size_t count = 1; count < blockOffsets.size(); count += 2) {
allocator.Deallocate(blockOffsets[count]);
}
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u);
}
// Verify the buddy allocator can deal with multiple allocations with mixed alignments.
TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
// After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8
// byte alignment.
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | S | S - split
// -------------------------------- F - free
// 2 8 | Aa | F | Ab | Ac | A - allocated
// --------------------------------
//
BuddyAllocator allocator(32);
// Allocate Aa (two splits).
ASSERT_EQ(allocator.Allocate(8, 16), 0u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Allocate Ab (skip Aa buddy due to alignment and perform another split).
ASSERT_EQ(allocator.Allocate(8, 16), 16u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Check that we cannot fit another.
ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset);
// Allocate Ac (zero splits and Ab's buddy is now the first free block).
ASSERT_EQ(allocator.Allocate(8, 8), 24u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
}
// Verify the buddy allocator can deal with multiple allocations with equal alignments.
TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
// After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4
// byte alignment.
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | Ac | S - split
// -------------------------------- F - free
// 2 8 | Aa | Ab | | A - allocated
// --------------------------------
//
constexpr uint64_t maxBlockSize = 32;
constexpr uint64_t alignment = 4;
BuddyAllocator allocator(maxBlockSize);
// Allocate block Aa (two splits)
ASSERT_EQ(allocator.Allocate(8, alignment), 0u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
// Allocate block Ab (Aa's buddy)
ASSERT_EQ(allocator.Allocate(8, alignment), 8u);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
// Check that we can still allocate Ac.
ASSERT_EQ(allocator.Allocate(16, alignment), 16ull);
ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
}
} // namespace dawn::native

View File

@ -22,440 +22,443 @@
#include "dawn/native/PooledResourceMemoryAllocator.h" #include "dawn/native/PooledResourceMemoryAllocator.h"
#include "dawn/native/ResourceHeapAllocator.h" #include "dawn/native/ResourceHeapAllocator.h"
using namespace dawn::native; namespace dawn::native {
class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator { class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
public: public:
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override { ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
return std::make_unique<ResourceHeapBase>(); uint64_t size) override {
} return std::make_unique<ResourceHeapBase>();
void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override { }
} void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
}; }
};
class PlaceholderBuddyResourceAllocator { class PlaceholderBuddyResourceAllocator {
public: public:
PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize) PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
: mAllocator(maxBlockSize, memorySize, &mHeapAllocator) { : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {
}
PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize,
uint64_t memorySize,
ResourceHeapAllocator* heapAllocator)
: mAllocator(maxBlockSize, memorySize, heapAllocator) {
}
ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) {
ResultOrError<ResourceMemoryAllocation> result =
mAllocator.Allocate(allocationSize, alignment);
return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{};
}
void Deallocate(ResourceMemoryAllocation& allocation) {
mAllocator.Deallocate(allocation);
}
uint64_t ComputeTotalNumOfHeapsForTesting() const {
return mAllocator.ComputeTotalNumOfHeapsForTesting();
}
private:
PlaceholderResourceHeapAllocator mHeapAllocator;
BuddyMemoryAllocator mAllocator;
};
// Verify a single resource allocation in a single heap.
TEST(BuddyMemoryAllocatorTests, SingleHeap) {
// After one 128 byte resource allocation:
//
// max block size -> ---------------------------
// | A1/H0 | Hi - Heap at index i
// max heap size -> --------------------------- An - Resource allocation n
//
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = heapSize;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Cannot allocate greater than heap size.
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
// Allocate one 128 byte allocation (same size as heap).
ResourceMemoryAllocation allocation1 = allocator.Allocate(128);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
// Cannot allocate when allocator is full.
invalidAllocation = allocator.Allocate(128);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
allocator.Deallocate(allocation1);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);
} }
PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, // Verify that multiple allocation are created in separate heaps.
uint64_t memorySize, TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
ResourceHeapAllocator* heapAllocator) // After two 128 byte resource allocations:
: mAllocator(maxBlockSize, memorySize, heapAllocator) { //
// max block size -> ---------------------------
// | | Hi - Heap at index i
// max heap size -> --------------------------- An - Resource allocation n
// | A1/H0 | A2/H1 |
// ---------------------------
//
constexpr uint64_t maxBlockSize = 256;
constexpr uint64_t heapSize = 128;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Cannot allocate greater than heap size.
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
// Cannot allocate greater than max block size.
invalidAllocation = allocator.Allocate(maxBlockSize * 2);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
// Allocate two 128 byte allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// First allocation creates first heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// Second allocation creates second heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
// Deallocate both allocations
allocator.Deallocate(allocation1);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0
allocator.Deallocate(allocation2);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1
} }
ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) { // Verify multiple sub-allocations can re-use heaps.
ResultOrError<ResourceMemoryAllocation> result = TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
mAllocator.Allocate(allocationSize, alignment); // After two 64 byte allocations with 128 byte heaps.
return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{}; //
// max block size -> ---------------------------
// | | Hi - Heap at index i
// max heap size -> --------------------------- An - Resource allocation n
// | H0 | H1 |
// ---------------------------
// | A1 | A2 | A3 | |
// ---------------------------
//
constexpr uint64_t maxBlockSize = 256;
constexpr uint64_t heapSize = 128;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Allocate two 64 byte sub-allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// First sub-allocation creates first heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// Second allocation re-uses first heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize);
ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// Third allocation creates second heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap());
// Deallocate all allocations in reverse order.
allocator.Deallocate(allocation1);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(),
2u); // A2 pins H0.
allocator.Deallocate(allocation2);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0
allocator.Deallocate(allocation3);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1
} }
void Deallocate(ResourceMemoryAllocation& allocation) { // Verify resource sub-allocation of various sizes over multiple heaps.
mAllocator.Deallocate(allocation); TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
// After three 64 byte allocations and two 128 byte allocations.
//
// max block size -> -------------------------------------------------------
// | |
// -------------------------------------------------------
// | | |
// max heap size -> -------------------------------------------------------
// | H0 | A3/H1 | H2 | A5/H3 |
// -------------------------------------------------------
// | A1 | A2 | | A4 | | |
// -------------------------------------------------------
//
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Allocate two 64-byte allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetOffset(), 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ResourceMemoryAllocation allocation2 = allocator.Allocate(64);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
ASSERT_EQ(allocation2.GetOffset(), 64u);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// A1 and A2 share H0
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ResourceMemoryAllocation allocation3 = allocator.Allocate(128);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
ASSERT_EQ(allocation3.GetOffset(), 0u);
ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// A3 creates and fully occupies a new heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
ResourceMemoryAllocation allocation4 = allocator.Allocate(64);
ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
ASSERT_EQ(allocation4.GetOffset(), 0u);
ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
// R5 size forms 64 byte hole after R4.
ResourceMemoryAllocation allocation5 = allocator.Allocate(128);
ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u);
ASSERT_EQ(allocation5.GetOffset(), 0u);
ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);
ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap());
// Deallocate allocations in staggered order.
allocator.Deallocate(allocation1);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u); // A2 pins H0
allocator.Deallocate(allocation5);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); // Released H3
allocator.Deallocate(allocation2);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); // Released H0
allocator.Deallocate(allocation4);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H2
allocator.Deallocate(allocation3);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1
} }
uint64_t ComputeTotalNumOfHeapsForTesting() const { // Verify resource sub-allocation of same sizes with various alignments.
return mAllocator.ComputeTotalNumOfHeapsForTesting(); TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
// After three 64 byte and one 128 byte resource allocations.
//
// max block size -> -------------------------------------------------------
// | |
// -------------------------------------------------------
// | | |
// max heap size -> -------------------------------------------------------
// | H0 | H1 | H2 | |
// -------------------------------------------------------
// | A1 | | A2 | | A3 | A4 | |
// -------------------------------------------------------
//
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetOffset(), 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u);
ASSERT_EQ(allocation2.GetOffset(), 0u);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u);
ASSERT_EQ(allocation3.GetOffset(), 0u);
ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64);
ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u);
ASSERT_EQ(allocation4.GetOffset(), 64u);
ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
} }
private: // Verify resource sub-allocation of various sizes with same alignments.
PlaceholderResourceHeapAllocator mHeapAllocator; TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
BuddyMemoryAllocator mAllocator; // After two 64 byte and two 128 byte resource allocations:
}; //
// max block size -> -------------------------------------------------------
// Verify a single resource allocation in a single heap. // | |
TEST(BuddyMemoryAllocatorTests, SingleHeap) { // -------------------------------------------------------
// After one 128 byte resource allocation: // | | |
// // max heap size -> -------------------------------------------------------
// max block size -> --------------------------- // | H0 | A3/H1 | A4/H2 | |
// | A1/H0 | Hi - Heap at index i // -------------------------------------------------------
// max heap size -> --------------------------- An - Resource allocation n // | A1 | A2 | | | |
// // -------------------------------------------------------
constexpr uint64_t heapSize = 128; //
constexpr uint64_t maxBlockSize = heapSize; constexpr uint64_t heapSize = 128;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Cannot allocate greater than heap size.
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2); constexpr uint64_t alignment = 64;
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment);
// Allocate one 128 byte allocation (same size as heap). ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ResourceMemoryAllocation allocation1 = allocator.Allocate(128); ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated); ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
// Cannot allocate when allocator is full. ASSERT_EQ(allocation2.GetOffset(), 64u);
invalidAllocation = allocator.Allocate(128); ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Reuses H0
allocator.Deallocate(allocation1); ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);
} ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
// Verify that multiple allocation are created in separate heaps. ASSERT_EQ(allocation3.GetOffset(), 0u);
TEST(BuddyMemoryAllocatorTests, MultipleHeaps) { ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// After two 128 byte resource allocations:
// ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
// max block size -> --------------------------- ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
// | | Hi - Heap at index i
// max heap size -> --------------------------- An - Resource allocation n ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment);
// | A1/H0 | A2/H1 | ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
// --------------------------- ASSERT_EQ(allocation4.GetOffset(), 0u);
// ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
constexpr uint64_t maxBlockSize = 256;
constexpr uint64_t heapSize = 128; ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize); ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
// Cannot allocate greater than heap size.
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
// Cannot allocate greater than max block size.
invalidAllocation = allocator.Allocate(maxBlockSize * 2);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
// Allocate two 128 byte allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// First allocation creates first heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// Second allocation creates second heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
// Deallocate both allocations
allocator.Deallocate(allocation1);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0
allocator.Deallocate(allocation2);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1
}
// Verify multiple sub-allocations can re-use heaps.
TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
// After two 64 byte allocations with 128 byte heaps.
//
// max block size -> ---------------------------
// | | Hi - Heap at index i
// max heap size -> --------------------------- An - Resource allocation n
// | H0 | H1 |
// ---------------------------
// | A1 | A2 | A3 | |
// ---------------------------
//
constexpr uint64_t maxBlockSize = 256;
constexpr uint64_t heapSize = 128;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Allocate two 64 byte sub-allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// First sub-allocation creates first heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// Second allocation re-uses first heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize);
ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// Third allocation creates second heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap());
// Deallocate all allocations in reverse order.
allocator.Deallocate(allocation1);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(),
2u); // A2 pins H0.
allocator.Deallocate(allocation2);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H0
allocator.Deallocate(allocation3);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1
}
// Verify resource sub-allocation of various sizes over multiple heaps.
TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
// After three 64 byte allocations and two 128 byte allocations.
//
// max block size -> -------------------------------------------------------
// | |
// -------------------------------------------------------
// | | |
// max heap size -> -------------------------------------------------------
// | H0 | A3/H1 | H2 | A5/H3 |
// -------------------------------------------------------
// | A1 | A2 | | A4 | | |
// -------------------------------------------------------
//
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Allocate two 64-byte allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetOffset(), 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ResourceMemoryAllocation allocation2 = allocator.Allocate(64);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
ASSERT_EQ(allocation2.GetOffset(), 64u);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// A1 and A2 share H0
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ResourceMemoryAllocation allocation3 = allocator.Allocate(128);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
ASSERT_EQ(allocation3.GetOffset(), 0u);
ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
// A3 creates and fully occupies a new heap.
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
ResourceMemoryAllocation allocation4 = allocator.Allocate(64);
ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
ASSERT_EQ(allocation4.GetOffset(), 0u);
ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
// R5 size forms 64 byte hole after R4.
ResourceMemoryAllocation allocation5 = allocator.Allocate(128);
ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u);
ASSERT_EQ(allocation5.GetOffset(), 0u);
ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);
ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap());
// Deallocate allocations in staggered order.
allocator.Deallocate(allocation1);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u); // A2 pins H0
allocator.Deallocate(allocation5);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u); // Released H3
allocator.Deallocate(allocation2);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u); // Released H0
allocator.Deallocate(allocation4);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Released H2
allocator.Deallocate(allocation3);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u); // Released H1
}
// Verify resource sub-allocation of same sizes with various alignments.
TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
// After three 64 byte and one 128 byte resource allocations.
//
// max block size -> -------------------------------------------------------
// | |
// -------------------------------------------------------
// | | |
// max heap size -> -------------------------------------------------------
// | H0 | H1 | H2 | |
// -------------------------------------------------------
// | A1 | | A2 | | A3 | A4 | |
// -------------------------------------------------------
//
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetOffset(), 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u);
ASSERT_EQ(allocation2.GetOffset(), 0u);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u);
ASSERT_EQ(allocation3.GetOffset(), 0u);
ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64);
ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u);
ASSERT_EQ(allocation4.GetOffset(), 64u);
ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
}
// Verify resource sub-allocation of various sizes with same alignments.
TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
// After two 64 byte and two 128 byte resource allocations:
//
// max block size -> -------------------------------------------------------
// | |
// -------------------------------------------------------
// | | |
// max heap size -> -------------------------------------------------------
// | H0 | A3/H1 | A4/H2 | |
// -------------------------------------------------------
// | A1 | A2 | | | |
// -------------------------------------------------------
//
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
constexpr uint64_t alignment = 64;
ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment);
ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
ASSERT_EQ(allocation2.GetOffset(), 64u);
ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u); // Reuses H0
ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment);
ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
ASSERT_EQ(allocation3.GetOffset(), 0u);
ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment);
ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
ASSERT_EQ(allocation4.GetOffset(), 0u);
ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
}
// Verify allocating a very large resource does not overflow.
TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
constexpr uint64_t largeBlock = (1ull << 63) + 1;
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
}
// Verify resource heaps will be reused from a pool.
TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
constexpr uint64_t kHeapSize = 128;
constexpr uint64_t kMaxBlockSize = 4096;
PlaceholderResourceHeapAllocator heapAllocator;
PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
std::set<ResourceHeapBase*> heaps = {};
std::vector<ResourceMemoryAllocation> allocations = {};
constexpr uint32_t kNumOfAllocations = 100;
// Allocate |kNumOfAllocations|.
for (uint32_t i = 0; i < kNumOfAllocations; i++) {
ResourceMemoryAllocation allocation = allocator.Allocate(4);
ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
heaps.insert(allocation.GetResourceHeap());
allocations.push_back(std::move(allocation));
} }
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); // Verify allocating a very large resource does not overflow.
TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512;
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Return the allocations to the pool. constexpr uint64_t largeBlock = (1ull << 63) + 1;
for (ResourceMemoryAllocation& allocation : allocations) { ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
allocator.Deallocate(allocation); ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
} }
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size()); // Verify resource heaps will be reused from a pool.
TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
constexpr uint64_t kHeapSize = 128;
constexpr uint64_t kMaxBlockSize = 4096;
// Allocate again reusing the same heaps. PlaceholderResourceHeapAllocator heapAllocator;
for (uint32_t i = 0; i < kNumOfAllocations; i++) { PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
ResourceMemoryAllocation allocation = allocator.Allocate(4); PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second); std::set<ResourceHeapBase*> heaps = {};
std::vector<ResourceMemoryAllocation> allocations = {};
constexpr uint32_t kNumOfAllocations = 100;
// Allocate |kNumOfAllocations|.
for (uint32_t i = 0; i < kNumOfAllocations; i++) {
ResourceMemoryAllocation allocation = allocator.Allocate(4);
ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
heaps.insert(allocation.GetResourceHeap());
allocations.push_back(std::move(allocation));
}
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
// Return the allocations to the pool.
for (ResourceMemoryAllocation& allocation : allocations) {
allocator.Deallocate(allocation);
}
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size());
// Allocate again reusing the same heaps.
for (uint32_t i = 0; i < kNumOfAllocations; i++) {
ResourceMemoryAllocation allocation = allocator.Allocate(4);
ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second);
}
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
} }
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); // Verify resource heaps that were reused from a pool can be destroyed.
} TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
constexpr uint64_t kHeapSize = 128;
constexpr uint64_t kMaxBlockSize = 4096;
// Verify resource heaps that were reused from a pool can be destroyed. PlaceholderResourceHeapAllocator heapAllocator;
TEST(BuddyMemoryAllocatorTests, DestroyHeaps) { PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
constexpr uint64_t kHeapSize = 128; PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
constexpr uint64_t kMaxBlockSize = 4096;
PlaceholderResourceHeapAllocator heapAllocator; std::set<ResourceHeapBase*> heaps = {};
PooledResourceMemoryAllocator poolAllocator(&heapAllocator); std::vector<ResourceMemoryAllocation> allocations = {};
PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
std::set<ResourceHeapBase*> heaps = {}; // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth
std::vector<ResourceMemoryAllocation> allocations = {}; // of buffers. Otherwise, the heap may be reused if not full.
constexpr uint32_t kNumOfHeaps = 10;
// Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth of // Allocate |kNumOfHeaps| worth.
// buffers. Otherwise, the heap may be reused if not full. while (heaps.size() < kNumOfHeaps) {
constexpr uint32_t kNumOfHeaps = 10; ResourceMemoryAllocation allocation = allocator.Allocate(4);
ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
heaps.insert(allocation.GetResourceHeap());
allocations.push_back(std::move(allocation));
}
// Allocate |kNumOfHeaps| worth. ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
while (heaps.size() < kNumOfHeaps) {
ResourceMemoryAllocation allocation = allocator.Allocate(4); // Return the allocations to the pool.
ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated); for (ResourceMemoryAllocation& allocation : allocations) {
heaps.insert(allocation.GetResourceHeap()); allocator.Deallocate(allocation);
allocations.push_back(std::move(allocation)); }
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps);
// Make sure we can destroy the remaining heaps.
poolAllocator.DestroyPool();
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
} }
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u); } // namespace dawn::native
// Return the allocations to the pool.
for (ResourceMemoryAllocation& allocation : allocations) {
allocator.Deallocate(allocation);
}
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps);
// Make sure we can destroy the remaining heaps.
poolAllocator.DestroyPool();
ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
}

View File

@ -19,486 +19,490 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "dawn/native/CommandAllocator.h" #include "dawn/native/CommandAllocator.h"
using namespace dawn::native; namespace dawn::native {
// Definition of the command types used in the tests // Definition of the command types used in the tests
enum class CommandType { enum class CommandType {
Draw, Draw,
Pipeline, Pipeline,
PushConstants, PushConstants,
Big, Big,
Small, Small,
};
struct CommandDraw {
uint32_t first;
uint32_t count;
};
struct CommandPipeline {
uint64_t pipeline;
uint32_t attachmentPoint;
};
struct CommandPushConstants {
uint8_t size;
uint8_t offset;
};
constexpr int kBigBufferSize = 65536;
struct CommandBig {
uint32_t buffer[kBigBufferSize];
};
struct CommandSmall {
uint16_t data;
};
// Test allocating nothing works
TEST(CommandAllocator, DoNothingAllocator) {
CommandAllocator allocator;
}
// Test iterating over nothing works
TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
CommandAllocator allocator;
CommandIterator iterator(std::move(allocator));
iterator.MakeEmptyAsDataWasDestroyed();
}
// Test basic usage of allocator + iterator
TEST(CommandAllocator, Basic) {
CommandAllocator allocator;
uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
uint32_t myAttachmentPoint = 2;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = myPipeline;
pipeline->attachmentPoint = myAttachmentPoint;
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
}
// Test basic usage of allocator + iterator with data
TEST(CommandAllocator, BasicWithData) {
CommandAllocator allocator;
uint8_t mySize = 8;
uint8_t myOffset = 3;
uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
{
CommandPushConstants* pushConstants =
allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
pushConstants->size = mySize;
pushConstants->offset = myOffset;
uint32_t* values = allocator.AllocateData<uint32_t>(5);
for (size_t i = 0; i < 5; i++) {
values[i] = myValues[i];
}
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::PushConstants);
CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
ASSERT_EQ(pushConstants->size, mySize);
ASSERT_EQ(pushConstants->offset, myOffset);
uint32_t* values = iterator.NextData<uint32_t>(5);
for (size_t i = 0; i < 5; i++) {
ASSERT_EQ(values[i], myValues[i]);
}
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
}
// Test basic iterating several times
TEST(CommandAllocator, MultipleIterations) {
CommandAllocator allocator;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
// First iteration
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
// Second iteration
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
}
// Test large commands work
TEST(CommandAllocator, LargeCommands) {
CommandAllocator allocator;
const int kCommandCount = 5;
uint32_t count = 0;
for (int i = 0; i < kCommandCount; i++) {
CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
for (int j = 0; j < kBigBufferSize; j++) {
big->buffer[j] = count++;
}
}
CommandIterator iterator(std::move(allocator));
CommandType type;
count = 0;
int numCommands = 0;
while (iterator.NextCommandId(&type)) {
ASSERT_EQ(type, CommandType::Big);
CommandBig* big = iterator.NextCommand<CommandBig>();
for (int i = 0; i < kBigBufferSize; i++) {
ASSERT_EQ(big->buffer[i], count);
count++;
}
numCommands++;
}
ASSERT_EQ(numCommands, kCommandCount);
iterator.MakeEmptyAsDataWasDestroyed();
}
// Test many small commands work
TEST(CommandAllocator, ManySmallCommands) {
CommandAllocator allocator;
// Stay under max representable uint16_t
const int kCommandCount = 50000;
uint16_t count = 0;
for (int i = 0; i < kCommandCount; i++) {
CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
small->data = count++;
}
CommandIterator iterator(std::move(allocator));
CommandType type;
count = 0;
int numCommands = 0;
while (iterator.NextCommandId(&type)) {
ASSERT_EQ(type, CommandType::Small);
CommandSmall* small = iterator.NextCommand<CommandSmall>();
ASSERT_EQ(small->data, count);
count++;
numCommands++;
}
ASSERT_EQ(numCommands, kCommandCount);
iterator.MakeEmptyAsDataWasDestroyed();
}
/* ________
* / \
* | POUIC! |
* \_ ______/
* v
* ()_()
* (O.o)
* (> <)o
*/
// Test usage of iterator.Reset
TEST(CommandAllocator, IteratorReset) {
CommandAllocator allocator;
uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
uint32_t myAttachmentPoint = 2;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = myPipeline;
pipeline->attachmentPoint = myAttachmentPoint;
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
iterator.Reset();
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
}
// Test iterating empty iterators
TEST(CommandAllocator, EmptyIterator) {
{
CommandAllocator allocator;
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
{
CommandAllocator allocator;
CommandIterator iterator1(std::move(allocator));
CommandIterator iterator2(std::move(iterator1));
CommandType type;
bool hasNext = iterator2.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator1.MakeEmptyAsDataWasDestroyed();
iterator2.MakeEmptyAsDataWasDestroyed();
}
{
CommandIterator iterator1;
CommandIterator iterator2(std::move(iterator1));
CommandType type;
bool hasNext = iterator2.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator1.MakeEmptyAsDataWasDestroyed();
iterator2.MakeEmptyAsDataWasDestroyed();
}
}
template <size_t A>
struct alignas(A) AlignedStruct {
char placeholder;
};
// Test for overflows in Allocate's computations, size 1 variant
TEST(CommandAllocator, AllocationOverflow_1) {
CommandAllocator allocator;
AlignedStruct<1>* data =
allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 2 variant
TEST(CommandAllocator, AllocationOverflow_2) {
CommandAllocator allocator;
AlignedStruct<2>* data =
allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 4 variant
TEST(CommandAllocator, AllocationOverflow_4) {
CommandAllocator allocator;
AlignedStruct<4>* data =
allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 8 variant
TEST(CommandAllocator, AllocationOverflow_8) {
CommandAllocator allocator;
AlignedStruct<8>* data =
allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
ASSERT_EQ(data, nullptr);
}
template <int DefaultValue>
struct IntWithDefault {
IntWithDefault() : value(DefaultValue) {
}
int value;
};
// Test that the allcator correctly defaults initalizes data for Allocate
TEST(CommandAllocator, AllocateDefaultInitializes) {
CommandAllocator allocator;
IntWithDefault<42>* int42 = allocator.Allocate<IntWithDefault<42>>(CommandType::Draw);
ASSERT_EQ(int42->value, 42);
IntWithDefault<43>* int43 = allocator.Allocate<IntWithDefault<43>>(CommandType::Draw);
ASSERT_EQ(int43->value, 43);
IntWithDefault<44>* int44 = allocator.Allocate<IntWithDefault<44>>(CommandType::Draw);
ASSERT_EQ(int44->value, 44);
CommandIterator iterator(std::move(allocator));
iterator.MakeEmptyAsDataWasDestroyed();
}
// Test that the allocator correctly default-initalizes data for AllocateData
TEST(CommandAllocator, AllocateDataDefaultInitializes) {
CommandAllocator allocator;
IntWithDefault<33>* int33 = allocator.AllocateData<IntWithDefault<33>>(1);
ASSERT_EQ(int33[0].value, 33);
IntWithDefault<34>* int34 = allocator.AllocateData<IntWithDefault<34>>(2);
ASSERT_EQ(int34[0].value, 34);
ASSERT_EQ(int34[0].value, 34);
IntWithDefault<35>* int35 = allocator.AllocateData<IntWithDefault<35>>(3);
ASSERT_EQ(int35[0].value, 35);
ASSERT_EQ(int35[1].value, 35);
ASSERT_EQ(int35[2].value, 35);
CommandIterator iterator(std::move(allocator));
iterator.MakeEmptyAsDataWasDestroyed();
}
// Tests flattening of multiple CommandAllocators into a single CommandIterator using
// AcquireCommandBlocks.
TEST(CommandAllocator, AcquireCommandBlocks) {
constexpr size_t kNumAllocators = 2;
constexpr size_t kNumCommandsPerAllocator = 2;
const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = {
{0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
{0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
}; };
const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2}, {3, 4}};
const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
std::vector<CommandAllocator> allocators(kNumAllocators); struct CommandDraw {
for (size_t j = 0; j < kNumAllocators; ++j) { uint32_t first;
CommandAllocator& allocator = allocators[j]; uint32_t count;
for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) { };
struct CommandPipeline {
uint64_t pipeline;
uint32_t attachmentPoint;
};
struct CommandPushConstants {
uint8_t size;
uint8_t offset;
};
constexpr int kBigBufferSize = 65536;
struct CommandBig {
uint32_t buffer[kBigBufferSize];
};
struct CommandSmall {
uint16_t data;
};
// Test allocating nothing works
TEST(CommandAllocator, DoNothingAllocator) {
CommandAllocator allocator;
}
// Test iterating over nothing works
TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
CommandAllocator allocator;
CommandIterator iterator(std::move(allocator));
iterator.MakeEmptyAsDataWasDestroyed();
}
// Test basic usage of allocator + iterator
TEST(CommandAllocator, Basic) {
CommandAllocator allocator;
uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
uint32_t myAttachmentPoint = 2;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline); CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = pipelines[j][i]; pipeline->pipeline = myPipeline;
pipeline->attachmentPoint = attachmentPoints[j][i]; pipeline->attachmentPoint = myAttachmentPoint;
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw); CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = firsts[j][i]; draw->first = myFirst;
draw->count = counts[j][i]; draw->count = myCount;
} }
}
CommandIterator iterator; {
iterator.AcquireCommandBlocks(std::move(allocators)); CommandIterator iterator(std::move(allocator));
for (size_t j = 0; j < kNumAllocators; ++j) {
for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
CommandType type; CommandType type;
bool hasNext = iterator.NextCommandId(&type); bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext); ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline); ASSERT_EQ(type, CommandType::Pipeline);
CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>(); CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, pipelines[j][i]); ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]); ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
hasNext = iterator.NextCommandId(&type); hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext); ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw); ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>(); CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, firsts[j][i]); ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, counts[j][i]); ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
} }
} }
CommandType type;
ASSERT_FALSE(iterator.NextCommandId(&type)); // Test basic usage of allocator + iterator with data
iterator.MakeEmptyAsDataWasDestroyed(); TEST(CommandAllocator, BasicWithData) {
} CommandAllocator allocator;
uint8_t mySize = 8;
uint8_t myOffset = 3;
uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
{
CommandPushConstants* pushConstants =
allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
pushConstants->size = mySize;
pushConstants->offset = myOffset;
uint32_t* values = allocator.AllocateData<uint32_t>(5);
for (size_t i = 0; i < 5; i++) {
values[i] = myValues[i];
}
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::PushConstants);
CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
ASSERT_EQ(pushConstants->size, mySize);
ASSERT_EQ(pushConstants->offset, myOffset);
uint32_t* values = iterator.NextData<uint32_t>(5);
for (size_t i = 0; i < 5; i++) {
ASSERT_EQ(values[i], myValues[i]);
}
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
}
// Test basic iterating several times
TEST(CommandAllocator, MultipleIterations) {
CommandAllocator allocator;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
// First iteration
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
// Second iteration
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
}
// Test large commands work
TEST(CommandAllocator, LargeCommands) {
CommandAllocator allocator;
const int kCommandCount = 5;
uint32_t count = 0;
for (int i = 0; i < kCommandCount; i++) {
CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
for (int j = 0; j < kBigBufferSize; j++) {
big->buffer[j] = count++;
}
}
CommandIterator iterator(std::move(allocator));
CommandType type;
count = 0;
int numCommands = 0;
while (iterator.NextCommandId(&type)) {
ASSERT_EQ(type, CommandType::Big);
CommandBig* big = iterator.NextCommand<CommandBig>();
for (int i = 0; i < kBigBufferSize; i++) {
ASSERT_EQ(big->buffer[i], count);
count++;
}
numCommands++;
}
ASSERT_EQ(numCommands, kCommandCount);
iterator.MakeEmptyAsDataWasDestroyed();
}
// Test many small commands work
TEST(CommandAllocator, ManySmallCommands) {
CommandAllocator allocator;
// Stay under max representable uint16_t
const int kCommandCount = 50000;
uint16_t count = 0;
for (int i = 0; i < kCommandCount; i++) {
CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
small->data = count++;
}
CommandIterator iterator(std::move(allocator));
CommandType type;
count = 0;
int numCommands = 0;
while (iterator.NextCommandId(&type)) {
ASSERT_EQ(type, CommandType::Small);
CommandSmall* small = iterator.NextCommand<CommandSmall>();
ASSERT_EQ(small->data, count);
count++;
numCommands++;
}
ASSERT_EQ(numCommands, kCommandCount);
iterator.MakeEmptyAsDataWasDestroyed();
}
/* ________
* / \
* | POUIC! |
* \_ ______/
* v
* ()_()
* (O.o)
* (> <)o
*/
// Test usage of iterator.Reset
TEST(CommandAllocator, IteratorReset) {
CommandAllocator allocator;
uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
uint32_t myAttachmentPoint = 2;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = myPipeline;
pipeline->attachmentPoint = myAttachmentPoint;
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
iterator.Reset();
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
}
// Test iterating empty iterators
TEST(CommandAllocator, EmptyIterator) {
{
CommandAllocator allocator;
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.MakeEmptyAsDataWasDestroyed();
}
{
CommandAllocator allocator;
CommandIterator iterator1(std::move(allocator));
CommandIterator iterator2(std::move(iterator1));
CommandType type;
bool hasNext = iterator2.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator1.MakeEmptyAsDataWasDestroyed();
iterator2.MakeEmptyAsDataWasDestroyed();
}
{
CommandIterator iterator1;
CommandIterator iterator2(std::move(iterator1));
CommandType type;
bool hasNext = iterator2.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator1.MakeEmptyAsDataWasDestroyed();
iterator2.MakeEmptyAsDataWasDestroyed();
}
}
template <size_t A>
struct alignas(A) AlignedStruct {
char placeholder;
};
// Test for overflows in Allocate's computations, size 1 variant
TEST(CommandAllocator, AllocationOverflow_1) {
CommandAllocator allocator;
AlignedStruct<1>* data =
allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 2 variant
TEST(CommandAllocator, AllocationOverflow_2) {
CommandAllocator allocator;
AlignedStruct<2>* data =
allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 4 variant
TEST(CommandAllocator, AllocationOverflow_4) {
CommandAllocator allocator;
AlignedStruct<4>* data =
allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 8 variant
TEST(CommandAllocator, AllocationOverflow_8) {
CommandAllocator allocator;
AlignedStruct<8>* data =
allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
ASSERT_EQ(data, nullptr);
}
template <int DefaultValue>
struct IntWithDefault {
IntWithDefault() : value(DefaultValue) {
}
int value;
};
// Test that the allcator correctly defaults initalizes data for Allocate
TEST(CommandAllocator, AllocateDefaultInitializes) {
CommandAllocator allocator;
IntWithDefault<42>* int42 = allocator.Allocate<IntWithDefault<42>>(CommandType::Draw);
ASSERT_EQ(int42->value, 42);
IntWithDefault<43>* int43 = allocator.Allocate<IntWithDefault<43>>(CommandType::Draw);
ASSERT_EQ(int43->value, 43);
IntWithDefault<44>* int44 = allocator.Allocate<IntWithDefault<44>>(CommandType::Draw);
ASSERT_EQ(int44->value, 44);
CommandIterator iterator(std::move(allocator));
iterator.MakeEmptyAsDataWasDestroyed();
}
// Test that the allocator correctly default-initalizes data for AllocateData
TEST(CommandAllocator, AllocateDataDefaultInitializes) {
CommandAllocator allocator;
IntWithDefault<33>* int33 = allocator.AllocateData<IntWithDefault<33>>(1);
ASSERT_EQ(int33[0].value, 33);
IntWithDefault<34>* int34 = allocator.AllocateData<IntWithDefault<34>>(2);
ASSERT_EQ(int34[0].value, 34);
ASSERT_EQ(int34[0].value, 34);
IntWithDefault<35>* int35 = allocator.AllocateData<IntWithDefault<35>>(3);
ASSERT_EQ(int35[0].value, 35);
ASSERT_EQ(int35[1].value, 35);
ASSERT_EQ(int35[2].value, 35);
CommandIterator iterator(std::move(allocator));
iterator.MakeEmptyAsDataWasDestroyed();
}
// Tests flattening of multiple CommandAllocators into a single CommandIterator using
// AcquireCommandBlocks.
TEST(CommandAllocator, AcquireCommandBlocks) {
constexpr size_t kNumAllocators = 2;
constexpr size_t kNumCommandsPerAllocator = 2;
const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = {
{0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
{0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
};
const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2},
{3, 4}};
const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
std::vector<CommandAllocator> allocators(kNumAllocators);
for (size_t j = 0; j < kNumAllocators; ++j) {
CommandAllocator& allocator = allocators[j];
for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
CommandPipeline* pipeline =
allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = pipelines[j][i];
pipeline->attachmentPoint = attachmentPoints[j][i];
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = firsts[j][i];
draw->count = counts[j][i];
}
}
CommandIterator iterator;
iterator.AcquireCommandBlocks(std::move(allocators));
for (size_t j = 0; j < kNumAllocators; ++j) {
for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, pipelines[j][i]);
ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]);
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, firsts[j][i]);
ASSERT_EQ(draw->count, counts[j][i]);
}
}
CommandType type;
ASSERT_FALSE(iterator.NextCommandId(&type));
iterator.MakeEmptyAsDataWasDestroyed();
}
} // namespace dawn::native

View File

@ -18,9 +18,7 @@
#include "dawn/native/ErrorData.h" #include "dawn/native/ErrorData.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
using namespace dawn::native; namespace dawn::native { namespace {
namespace {
int placeholderSuccess = 0xbeef; int placeholderSuccess = 0xbeef;
const char* placeholderErrorMessage = "I am an error message :3"; const char* placeholderErrorMessage = "I am an error message :3";
@ -360,4 +358,6 @@ namespace {
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
} // anonymous namespace // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::native::

View File

@ -16,74 +16,76 @@
#include "dawn/native/PerStage.h" #include "dawn/native/PerStage.h"
using namespace dawn::native; namespace dawn::native {
// Tests for StageBit // Tests for StageBit
TEST(PerStage, StageBit) { TEST(PerStage, StageBit) {
ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex); ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex);
ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment); ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment);
ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute); ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute);
}
// Basic test for the PerStage container
TEST(PerStage, PerStage) {
PerStage<int> data;
// Store data using wgpu::ShaderStage
data[SingleShaderStage::Vertex] = 42;
data[SingleShaderStage::Fragment] = 3;
data[SingleShaderStage::Compute] = -1;
// Load it using wgpu::ShaderStage
ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42);
ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3);
ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1);
}
// Test IterateStages with kAllStages
TEST(PerStage, IterateAllStages) {
PerStage<int> counts;
counts[SingleShaderStage::Vertex] = 0;
counts[SingleShaderStage::Fragment] = 0;
counts[SingleShaderStage::Compute] = 0;
for (auto stage : IterateStages(kAllStages)) {
counts[stage]++;
} }
ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1); // Basic test for the PerStage container
ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1); TEST(PerStage, PerStage) {
ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1); PerStage<int> data;
}
// Test IterateStages with one stage // Store data using wgpu::ShaderStage
TEST(PerStage, IterateOneStage) { data[SingleShaderStage::Vertex] = 42;
PerStage<int> counts; data[SingleShaderStage::Fragment] = 3;
counts[SingleShaderStage::Vertex] = 0; data[SingleShaderStage::Compute] = -1;
counts[SingleShaderStage::Fragment] = 0;
counts[SingleShaderStage::Compute] = 0;
for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) { // Load it using wgpu::ShaderStage
counts[stage]++; ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42);
ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3);
ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1);
} }
ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0); // Test IterateStages with kAllStages
ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1); TEST(PerStage, IterateAllStages) {
ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0); PerStage<int> counts;
} counts[SingleShaderStage::Vertex] = 0;
counts[SingleShaderStage::Fragment] = 0;
counts[SingleShaderStage::Compute] = 0;
// Test IterateStages with no stage for (auto stage : IterateStages(kAllStages)) {
TEST(PerStage, IterateNoStages) { counts[stage]++;
PerStage<int> counts; }
counts[SingleShaderStage::Vertex] = 0;
counts[SingleShaderStage::Fragment] = 0;
counts[SingleShaderStage::Compute] = 0;
for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) { ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1);
counts[stage]++; ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1);
} }
ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0); // Test IterateStages with one stage
ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0); TEST(PerStage, IterateOneStage) {
ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0); PerStage<int> counts;
} counts[SingleShaderStage::Vertex] = 0;
counts[SingleShaderStage::Fragment] = 0;
counts[SingleShaderStage::Compute] = 0;
for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) {
counts[stage]++;
}
ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
}
// Test IterateStages with no stage
TEST(PerStage, IterateNoStages) {
PerStage<int> counts;
counts[SingleShaderStage::Vertex] = 0;
counts[SingleShaderStage::Fragment] = 0;
counts[SingleShaderStage::Compute] = 0;
for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) {
counts[stage]++;
}
ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0);
ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
}
} // namespace dawn::native

View File

@ -18,7 +18,8 @@
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
using namespace testing; using testing::InSequence;
using testing::StrictMock;
namespace { namespace {
@ -34,7 +35,7 @@ namespace {
std::unique_ptr<StrictMock<MockDestructor>> mockDestructor; std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
class PlacementAllocatedTests : public Test { class PlacementAllocatedTests : public testing::Test {
void SetUp() override { void SetUp() override {
mockDestructor = std::make_unique<StrictMock<MockDestructor>>(); mockDestructor = std::make_unique<StrictMock<MockDestructor>>();
} }

View File

@ -17,159 +17,163 @@
#include "dawn/native/RingBufferAllocator.h" #include "dawn/native/RingBufferAllocator.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
using namespace dawn::native; namespace dawn::native {
constexpr uint64_t RingBufferAllocator::kInvalidOffset; constexpr uint64_t RingBufferAllocator::kInvalidOffset;
// Number of basic tests for Ringbuffer // Number of basic tests for Ringbuffer
TEST(RingBufferAllocatorTests, BasicTest) { TEST(RingBufferAllocatorTests, BasicTest) {
constexpr uint64_t sizeInBytes = 64000; constexpr uint64_t sizeInBytes = 64000;
RingBufferAllocator allocator(sizeInBytes); RingBufferAllocator allocator(sizeInBytes);
// Ensure no requests exist on empty buffer. // Ensure no requests exist on empty buffer.
EXPECT_TRUE(allocator.Empty()); EXPECT_TRUE(allocator.Empty());
ASSERT_EQ(allocator.GetSize(), sizeInBytes); ASSERT_EQ(allocator.GetSize(), sizeInBytes);
// Ensure failure upon sub-allocating an oversized request. // Ensure failure upon sub-allocating an oversized request.
ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)), ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)),
RingBufferAllocator::kInvalidOffset); RingBufferAllocator::kInvalidOffset);
// Fill the entire buffer with two requests of equal size. // Fill the entire buffer with two requests of equal size.
ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u); ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u);
ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u); ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u);
// Ensure the buffer is full. // Ensure the buffer is full.
ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset); ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset);
}
// Tests that several ringbuffer allocations do not fail.
TEST(RingBufferAllocatorTests, RingBufferManyAlloc) {
constexpr uint64_t maxNumOfFrames = 64000;
constexpr uint64_t frameSizeInBytes = 4;
RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
size_t offset = 0;
for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) {
offset = allocator.Allocate(frameSizeInBytes, i);
ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes);
} }
}
// Tests ringbuffer sub-allocations of the same serial are correctly tracked. // Tests that several ringbuffer allocations do not fail.
TEST(RingBufferAllocatorTests, AllocInSameFrame) { TEST(RingBufferAllocatorTests, RingBufferManyAlloc) {
constexpr uint64_t maxNumOfFrames = 3; constexpr uint64_t maxNumOfFrames = 64000;
constexpr uint64_t frameSizeInBytes = 4; constexpr uint64_t frameSizeInBytes = 4;
RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
// F1 size_t offset = 0;
// [xxxx|--------] for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) {
size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1)); offset = allocator.Allocate(frameSizeInBytes, i);
ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes);
}
}
// F1 F2 // Tests ringbuffer sub-allocations of the same serial are correctly tracked.
// [xxxx|xxxx|----] TEST(RingBufferAllocatorTests, AllocInSameFrame) {
constexpr uint64_t maxNumOfFrames = 3;
constexpr uint64_t frameSizeInBytes = 4;
offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2)); RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
// F1 F2 // F1
// [xxxx|xxxxxxxx] // [xxxx|--------]
size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1));
offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2)); // F1 F2
// [xxxx|xxxx|----]
ASSERT_EQ(offset, 8u); offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3);
allocator.Deallocate(ExecutionSerial(2)); // F1 F2
// [xxxx|xxxxxxxx]
ASSERT_EQ(allocator.GetUsedSize(), 0u); offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
EXPECT_TRUE(allocator.Empty());
}
// Tests ringbuffer sub-allocation at various offsets. ASSERT_EQ(offset, 8u);
TEST(RingBufferAllocatorTests, RingBufferSubAlloc) { ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3);
constexpr uint64_t maxNumOfFrames = 10;
constexpr uint64_t frameSizeInBytes = 4;
RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes); allocator.Deallocate(ExecutionSerial(2));
// Sub-alloc the first eight frames. ASSERT_EQ(allocator.GetUsedSize(), 0u);
ExecutionSerial serial(0); EXPECT_TRUE(allocator.Empty());
while (serial < ExecutionSerial(8)) { }
allocator.Allocate(frameSizeInBytes, serial);
// Tests ringbuffer sub-allocation at various offsets.
TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
constexpr uint64_t maxNumOfFrames = 10;
constexpr uint64_t frameSizeInBytes = 4;
RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
// Sub-alloc the first eight frames.
ExecutionSerial serial(0);
while (serial < ExecutionSerial(8)) {
allocator.Allocate(frameSizeInBytes, serial);
serial++;
}
// Each frame corrresponds to the serial number (for simplicity).
//
// F1 F2 F3 F4 F5 F6 F7 F8
// [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
//
// Ensure an oversized allocation fails (only 8 bytes left)
ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial),
RingBufferAllocator::kInvalidOffset);
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
// Reclaim the first 3 frames.
allocator.Deallocate(ExecutionSerial(2));
// F4 F5 F6 F7 F8
// [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
//
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5);
// Re-try the over-sized allocation.
size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial));
// F9 F4 F5 F6 F7 F8
// [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx]
// ^^^^^^^^ wasted
// In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes
// were added to F9's sub-allocation.
// TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes.
ASSERT_EQ(offset, 0u);
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
// Ensure we are full.
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
RingBufferAllocator::kInvalidOffset);
// Reclaim the next two frames.
allocator.Deallocate(ExecutionSerial(4));
// F9 F4 F5 F6 F7 F8
// [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx]
//
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
// Sub-alloc the chunk in the middle.
serial++; serial++;
offset = allocator.Allocate(frameSizeInBytes * 2, serial);
ASSERT_EQ(offset, frameSizeInBytes * 3);
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
// F9 F10 F6 F7 F8
// [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx]
//
// Ensure we are full.
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
RingBufferAllocator::kInvalidOffset);
// Reclaim all.
allocator.Deallocate(kMaxExecutionSerial);
EXPECT_TRUE(allocator.Empty());
} }
// Each frame corrresponds to the serial number (for simplicity). // Checks if ringbuffer sub-allocation does not overflow.
// TEST(RingBufferAllocatorTests, RingBufferOverflow) {
// F1 F2 F3 F4 F5 F6 F7 F8 RingBufferAllocator allocator(std::numeric_limits<uint64_t>::max());
// [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
//
// Ensure an oversized allocation fails (only 8 bytes left) ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u);
ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial), ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
RingBufferAllocator::kInvalidOffset); RingBufferAllocator::kInvalidOffset);
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8); }
// Reclaim the first 3 frames. } // namespace dawn::native
allocator.Deallocate(ExecutionSerial(2));
// F4 F5 F6 F7 F8
// [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
//
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5);
// Re-try the over-sized allocation.
size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial));
// F9 F4 F5 F6 F7 F8
// [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx]
// ^^^^^^^^ wasted
// In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes
// were added to F9's sub-allocation.
// TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes.
ASSERT_EQ(offset, 0u);
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
// Ensure we are full.
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
// Reclaim the next two frames.
allocator.Deallocate(ExecutionSerial(4));
// F9 F4 F5 F6 F7 F8
// [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx]
//
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
// Sub-alloc the chunk in the middle.
serial++;
offset = allocator.Allocate(frameSizeInBytes * 2, serial);
ASSERT_EQ(offset, frameSizeInBytes * 3);
ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
// F9 F10 F6 F7 F8
// [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx]
//
// Ensure we are full.
ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
// Reclaim all.
allocator.Deallocate(kMaxExecutionSerial);
EXPECT_TRUE(allocator.Empty());
}
// Checks if ringbuffer sub-allocation does not overflow.
TEST(RingBufferAllocatorTests, RingBufferOverflow) {
RingBufferAllocator allocator(std::numeric_limits<uint64_t>::max());
ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u);
ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
RingBufferAllocator::kInvalidOffset);
}

File diff suppressed because it is too large Load Diff

View File

@ -22,9 +22,6 @@
// Make our own Base - Backend object pair, reusing the AdapterBase name // Make our own Base - Backend object pair, reusing the AdapterBase name
namespace dawn::native { namespace dawn::native {
class AdapterBase : public RefCounted {}; class AdapterBase : public RefCounted {};
} // namespace dawn::native
using namespace dawn::native;
class MyAdapter : public AdapterBase {}; class MyAdapter : public AdapterBase {};
@ -85,3 +82,5 @@ TEST(ToBackend, Ref) {
adapter->Release(); adapter->Release();
} }
} }
} // namespace dawn::native

View File

@ -24,507 +24,516 @@
#include "dawn/webgpu_cpp_print.h" #include "dawn/webgpu_cpp_print.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
using namespace dawn::native::d3d12; namespace dawn::native::d3d12 {
namespace {
namespace { struct TextureSpec {
uint32_t x;
struct TextureSpec { uint32_t y;
uint32_t x; uint32_t z;
uint32_t y; uint32_t width;
uint32_t z; uint32_t height;
uint32_t width; uint32_t depthOrArrayLayers;
uint32_t height; uint32_t texelBlockSizeInBytes;
uint32_t depthOrArrayLayers; uint32_t blockWidth = 1;
uint32_t texelBlockSizeInBytes; uint32_t blockHeight = 1;
uint32_t blockWidth = 1;
uint32_t blockHeight = 1;
};
struct BufferSpec {
uint64_t offset;
uint32_t bytesPerRow;
uint32_t rowsPerImage;
};
// Check that each copy region fits inside the buffer footprint
void ValidateFootprints(const TextureSpec& textureSpec,
const BufferSpec& bufferSpec,
const TextureCopySubresource& copySplit,
wgpu::TextureDimension dimension) {
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width);
ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height);
ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers,
copy.bufferSize.depthOrArrayLayers);
// If there are multiple layers, 2D texture splitter actually splits each layer
// independently. See the details in Compute2DTextureCopySplits(). As a result,
// if we simply expand a copy region generated by 2D texture splitter to all
// layers, the copy region might be OOB. But that is not the approach that the current
// 2D texture splitter is doing, although Compute2DTextureCopySubresource forwards
// "copySize.depthOrArrayLayers" to the copy region it generated. So skip the test
// below for 2D textures with multiple layers.
if (textureSpec.depthOrArrayLayers <= 1 || dimension == wgpu::TextureDimension::e3D) {
uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
uint64_t minimumRequiredBufferSize =
bufferSpec.offset +
utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, bufferSpec.rowsPerImage,
widthInBlocks, heightInBlocks,
textureSpec.depthOrArrayLayers,
textureSpec.texelBlockSizeInBytes);
// The last pixel (buffer footprint) of each copy region depends on its bufferOffset
// and copySize. It is not the last pixel where the bufferSize ends.
ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u);
uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth;
ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u);
ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u);
uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height;
ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u);
uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight;
uint64_t bufferSizeForFootprint =
copy.alignedOffset +
utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height,
footprintWidthInBlocks, footprintHeightInBlocks,
copy.bufferSize.depthOrArrayLayers,
textureSpec.texelBlockSizeInBytes);
// The buffer footprint of each copy region should not exceed the minimum required
// buffer size. Otherwise, pixels accessed by copy may be OOB.
ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
}
}
}
// Check that the offset is aligned
void ValidateOffset(const TextureCopySubresource& copySplit) {
for (uint32_t i = 0; i < copySplit.count; ++i) {
ASSERT_TRUE(
Align(copySplit.copies[i].alignedOffset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
copySplit.copies[i].alignedOffset);
}
}
bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) {
return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB);
}
// Check that no pair of copy regions intersect each other
void ValidateDisjoint(const TextureCopySubresource& copySplit) {
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& a = copySplit.copies[i];
for (uint32_t j = i + 1; j < copySplit.count; ++j) {
const auto& b = copySplit.copies[j];
// If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and
// 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be
// [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
// included.
bool overlapX = InclusiveRangesOverlap(
a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1, b.textureOffset.x,
b.textureOffset.x + b.copySize.width - 1);
bool overlapY = InclusiveRangesOverlap(
a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1, b.textureOffset.y,
b.textureOffset.y + b.copySize.height - 1);
bool overlapZ = InclusiveRangesOverlap(
a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
ASSERT_TRUE(!overlapX || !overlapY || !overlapZ);
}
}
}
// Check that the union of the copy regions exactly covers the texture region
void ValidateTextureBounds(const TextureSpec& textureSpec,
const TextureCopySubresource& copySplit) {
ASSERT_GT(copySplit.count, 0u);
uint32_t minX = copySplit.copies[0].textureOffset.x;
uint32_t minY = copySplit.copies[0].textureOffset.y;
uint32_t minZ = copySplit.copies[0].textureOffset.z;
uint32_t maxX = copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
uint32_t maxY = copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
uint32_t maxZ =
copySplit.copies[0].textureOffset.z + copySplit.copies[0].copySize.depthOrArrayLayers;
for (uint32_t i = 1; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
minX = std::min(minX, copy.textureOffset.x);
minY = std::min(minY, copy.textureOffset.y);
minZ = std::min(minZ, copy.textureOffset.z);
maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width);
maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height);
maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers);
}
ASSERT_EQ(minX, textureSpec.x);
ASSERT_EQ(minY, textureSpec.y);
ASSERT_EQ(minZ, textureSpec.z);
ASSERT_EQ(maxX, textureSpec.x + textureSpec.width);
ASSERT_EQ(maxY, textureSpec.y + textureSpec.height);
ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers);
}
// Validate that the number of pixels copied is exactly equal to the number of pixels in the
// texture region
void ValidatePixelCount(const TextureSpec& textureSpec,
const TextureCopySubresource& copySplit) {
uint32_t count = 0;
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
uint32_t copiedPixels =
copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers;
ASSERT_GT(copiedPixels, 0u);
count += copiedPixels;
}
ASSERT_EQ(count, textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
}
// Check that every buffer offset is at the correct pixel location
void ValidateBufferOffset(const TextureSpec& textureSpec,
const BufferSpec& bufferSpec,
const TextureCopySubresource& copySplit,
wgpu::TextureDimension dimension) {
ASSERT_GT(copySplit.count, 0u);
uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight;
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
uint32_t bytesPerRowInTexels =
bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
uint32_t slicePitchInTexels =
bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight);
uint32_t absoluteTexelOffset =
copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock +
copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock +
copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels;
// There is one empty row at most in a 2D copy region. However, it is not true for
// a 3D texture copy region when we are copying the last row of each slice. We may
// need to offset a lot rows and copy.bufferOffset.y may be big.
if (dimension == wgpu::TextureDimension::e2D) {
ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight);
}
ASSERT_EQ(copy.bufferOffset.z, 0u);
ASSERT_GE(absoluteTexelOffset,
bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock);
uint32_t relativeTexelOffset =
absoluteTexelOffset -
bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
uint32_t z = relativeTexelOffset / slicePitchInTexels;
uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels;
uint32_t x = relativeTexelOffset % bytesPerRowInTexels;
ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x);
ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y);
ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z);
}
}
void ValidateCopySplit(const TextureSpec& textureSpec,
const BufferSpec& bufferSpec,
const TextureCopySubresource& copySplit,
wgpu::TextureDimension dimension) {
ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension);
ValidateOffset(copySplit);
ValidateDisjoint(copySplit);
ValidateTextureBounds(textureSpec, copySplit);
ValidatePixelCount(textureSpec, copySplit);
ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension);
}
std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) {
os << "TextureSpec("
<< "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
<< textureSpec.width << ", " << textureSpec.height << ", "
<< textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes << ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) {
os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", "
<< bufferSpec.rowsPerImage << ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) {
os << "CopySplit" << std::endl;
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
os << " " << i << ": Texture at (" << copy.textureOffset.x << ", "
<< copy.textureOffset.y << ", " << copy.textureOffset.z << "), size ("
<< copy.copySize.width << ", " << copy.copySize.height << ", "
<< copy.copySize.depthOrArrayLayers << ")" << std::endl;
os << " " << i << ": Buffer at (" << copy.bufferOffset.x << ", " << copy.bufferOffset.y
<< ", " << copy.bufferOffset.z << "), footprint (" << copy.bufferSize.width << ", "
<< copy.bufferSize.height << ", " << copy.bufferSize.depthOrArrayLayers << ")"
<< std::endl;
}
return os;
}
// Define base texture sizes and offsets to test with: some aligned, some unaligned
constexpr TextureSpec kBaseTextureSpecs[] = {
{0, 0, 0, 1, 1, 1, 4},
{0, 0, 0, 64, 1, 1, 4},
{0, 0, 0, 128, 1, 1, 4},
{0, 0, 0, 192, 1, 1, 4},
{31, 16, 0, 1, 1, 1, 4},
{64, 16, 0, 1, 1, 1, 4},
{64, 16, 8, 1, 1, 1, 4},
{0, 0, 0, 64, 2, 1, 4},
{0, 0, 0, 64, 1, 2, 4},
{0, 0, 0, 64, 2, 2, 4},
{0, 0, 0, 128, 2, 1, 4},
{0, 0, 0, 128, 1, 2, 4},
{0, 0, 0, 128, 2, 2, 4},
{0, 0, 0, 192, 2, 1, 4},
{0, 0, 0, 192, 1, 2, 4},
{0, 0, 0, 192, 2, 2, 4},
{0, 0, 0, 1024, 1024, 1, 4},
{256, 512, 0, 1024, 1024, 1, 4},
{64, 48, 0, 1024, 1024, 1, 4},
{64, 48, 16, 1024, 1024, 1024, 4},
{0, 0, 0, 257, 31, 1, 4},
{0, 0, 0, 17, 93, 1, 4},
{59, 13, 0, 257, 31, 1, 4},
{17, 73, 0, 17, 93, 1, 4},
{17, 73, 59, 17, 93, 99, 4},
{0, 0, 0, 4, 4, 1, 8, 4, 4},
{64, 16, 0, 4, 4, 1, 8, 4, 4},
{64, 16, 8, 4, 4, 1, 8, 4, 4},
{0, 0, 0, 4, 4, 1, 16, 4, 4},
{64, 16, 0, 4, 4, 1, 16, 4, 4},
{64, 16, 8, 4, 4, 1, 16, 4, 4},
{0, 0, 0, 1024, 1024, 1, 8, 4, 4},
{256, 512, 0, 1024, 1024, 1, 8, 4, 4},
{64, 48, 0, 1024, 1024, 1, 8, 4, 4},
{64, 48, 16, 1024, 1024, 1, 8, 4, 4},
{0, 0, 0, 1024, 1024, 1, 16, 4, 4},
{256, 512, 0, 1024, 1024, 1, 16, 4, 4},
{64, 48, 0, 1024, 1024, 1, 4, 16, 4},
{64, 48, 16, 1024, 1024, 1, 16, 4, 4},
};
// Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow is
// the minimum required
std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width,
kTextureBytesPerRowAlignment);
auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t {
return value == 0 ? 0 : ((value - 1) / size + 1) * size;
}; };
return { struct BufferSpec {
BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow, uint64_t offset;
textureSpec.height}, uint32_t bytesPerRow;
BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow, uint32_t rowsPerImage;
textureSpec.height},
BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height * 2},
BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height * 2},
BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height * 2},
}; };
}
// Define a list of values to set properties in the spec structs // Check that each copy region fits inside the buffer footprint
constexpr uint32_t kCheckValues[] = {1, 2, 3, 4, 5, 6, 7, 8, // small values void ValidateFootprints(const TextureSpec& textureSpec,
16, 32, 64, 128, 256, 512, 1024, 2048, // powers of 2 const BufferSpec& bufferSpec,
15, 31, 63, 127, 257, 511, 1023, 2047, // misalignments const TextureCopySubresource& copySplit,
17, 33, 65, 129, 257, 513, 1025, 2049}; wgpu::TextureDimension dimension) {
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width);
ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height);
ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers,
copy.bufferSize.depthOrArrayLayers);
} // namespace // If there are multiple layers, 2D texture splitter actually splits each layer
// independently. See the details in Compute2DTextureCopySplits(). As a result,
// if we simply expand a copy region generated by 2D texture splitter to all
// layers, the copy region might be OOB. But that is not the approach that the
// current 2D texture splitter is doing, although Compute2DTextureCopySubresource
// forwards "copySize.depthOrArrayLayers" to the copy region it generated. So skip
// the test below for 2D textures with multiple layers.
if (textureSpec.depthOrArrayLayers <= 1 ||
dimension == wgpu::TextureDimension::e3D) {
uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
uint64_t minimumRequiredBufferSize =
bufferSpec.offset +
utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, bufferSpec.rowsPerImage,
widthInBlocks, heightInBlocks,
textureSpec.depthOrArrayLayers,
textureSpec.texelBlockSizeInBytes);
class CopySplitTest : public testing::TestWithParam<wgpu::TextureDimension> { // The last pixel (buffer footprint) of each copy region depends on its
protected: // bufferOffset and copySize. It is not the last pixel where the bufferSize
void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) { // ends.
ASSERT(textureSpec.width % textureSpec.blockWidth == 0 && ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
textureSpec.height % textureSpec.blockHeight == 0); ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u);
uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth;
wgpu::TextureDimension dimension = GetParam(); ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u);
TextureCopySubresource copySplit; ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u);
switch (dimension) { uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height;
case wgpu::TextureDimension::e2D: { ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u);
copySplit = Compute2DTextureCopySubresource( uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight;
{textureSpec.x, textureSpec.y, textureSpec.z},
{textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers}, uint64_t bufferSizeForFootprint =
{textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth, copy.alignedOffset +
textureSpec.blockHeight}, utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height,
bufferSpec.offset, bufferSpec.bytesPerRow); footprintWidthInBlocks, footprintHeightInBlocks,
break; copy.bufferSize.depthOrArrayLayers,
textureSpec.texelBlockSizeInBytes);
// The buffer footprint of each copy region should not exceed the minimum
// required buffer size. Otherwise, pixels accessed by copy may be OOB.
ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
}
} }
case wgpu::TextureDimension::e3D: {
copySplit = Compute3DTextureCopySplits(
{textureSpec.x, textureSpec.y, textureSpec.z},
{textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
{textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
textureSpec.blockHeight},
bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
break;
}
default:
UNREACHABLE();
break;
} }
ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension); // Check that the offset is aligned
void ValidateOffset(const TextureCopySubresource& copySplit) {
if (HasFatalFailure()) { for (uint32_t i = 0; i < copySplit.count; ++i) {
std::ostringstream message; ASSERT_TRUE(Align(copySplit.copies[i].alignedOffset,
message << "Failed generating splits: " << textureSpec << ", " << bufferSpec D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
<< std::endl copySplit.copies[i].alignedOffset);
<< dimension << " " << copySplit << std::endl;
FAIL() << message.str();
}
}
};
TEST_P(CopySplitTest, General) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
}
}
TEST_P(CopySplitTest, TextureWidth) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (uint32_t val : kCheckValues) {
if (val % textureSpec.blockWidth != 0) {
continue;
} }
textureSpec.width = val; }
bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) {
return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB);
}
// Check that no pair of copy regions intersect each other
void ValidateDisjoint(const TextureCopySubresource& copySplit) {
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& a = copySplit.copies[i];
for (uint32_t j = i + 1; j < copySplit.count; ++j) {
const auto& b = copySplit.copies[j];
// If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and
// 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be
// [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
// included.
bool overlapX = InclusiveRangesOverlap(
a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1,
b.textureOffset.x, b.textureOffset.x + b.copySize.width - 1);
bool overlapY = InclusiveRangesOverlap(
a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1,
b.textureOffset.y, b.textureOffset.y + b.copySize.height - 1);
bool overlapZ = InclusiveRangesOverlap(
a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
ASSERT_TRUE(!overlapX || !overlapY || !overlapZ);
}
}
}
// Check that the union of the copy regions exactly covers the texture region
void ValidateTextureBounds(const TextureSpec& textureSpec,
const TextureCopySubresource& copySplit) {
ASSERT_GT(copySplit.count, 0u);
uint32_t minX = copySplit.copies[0].textureOffset.x;
uint32_t minY = copySplit.copies[0].textureOffset.y;
uint32_t minZ = copySplit.copies[0].textureOffset.z;
uint32_t maxX =
copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
uint32_t maxY =
copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
uint32_t maxZ = copySplit.copies[0].textureOffset.z +
copySplit.copies[0].copySize.depthOrArrayLayers;
for (uint32_t i = 1; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
minX = std::min(minX, copy.textureOffset.x);
minY = std::min(minY, copy.textureOffset.y);
minZ = std::min(minZ, copy.textureOffset.z);
maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width);
maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height);
maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers);
}
ASSERT_EQ(minX, textureSpec.x);
ASSERT_EQ(minY, textureSpec.y);
ASSERT_EQ(minZ, textureSpec.z);
ASSERT_EQ(maxX, textureSpec.x + textureSpec.width);
ASSERT_EQ(maxY, textureSpec.y + textureSpec.height);
ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers);
}
// Validate that the number of pixels copied is exactly equal to the number of pixels in the
// texture region
void ValidatePixelCount(const TextureSpec& textureSpec,
const TextureCopySubresource& copySplit) {
uint32_t count = 0;
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
uint32_t copiedPixels =
copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers;
ASSERT_GT(copiedPixels, 0u);
count += copiedPixels;
}
ASSERT_EQ(count,
textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
}
// Check that every buffer offset is at the correct pixel location
void ValidateBufferOffset(const TextureSpec& textureSpec,
const BufferSpec& bufferSpec,
const TextureCopySubresource& copySplit,
wgpu::TextureDimension dimension) {
ASSERT_GT(copySplit.count, 0u);
uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight;
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
uint32_t bytesPerRowInTexels =
bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
uint32_t slicePitchInTexels =
bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight);
uint32_t absoluteTexelOffset =
copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock +
copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock +
copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels;
// There is one empty row at most in a 2D copy region. However, it is not true for
// a 3D texture copy region when we are copying the last row of each slice. We may
// need to offset a lot rows and copy.bufferOffset.y may be big.
if (dimension == wgpu::TextureDimension::e2D) {
ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight);
}
ASSERT_EQ(copy.bufferOffset.z, 0u);
ASSERT_GE(absoluteTexelOffset,
bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock);
uint32_t relativeTexelOffset =
absoluteTexelOffset -
bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
uint32_t z = relativeTexelOffset / slicePitchInTexels;
uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels;
uint32_t x = relativeTexelOffset % bytesPerRowInTexels;
ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x);
ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y);
ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z);
}
}
void ValidateCopySplit(const TextureSpec& textureSpec,
const BufferSpec& bufferSpec,
const TextureCopySubresource& copySplit,
wgpu::TextureDimension dimension) {
ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension);
ValidateOffset(copySplit);
ValidateDisjoint(copySplit);
ValidateTextureBounds(textureSpec, copySplit);
ValidatePixelCount(textureSpec, copySplit);
ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension);
}
std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) {
os << "TextureSpec("
<< "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
<< textureSpec.width << ", " << textureSpec.height << ", "
<< textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes
<< ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) {
os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", "
<< bufferSpec.rowsPerImage << ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) {
os << "CopySplit" << std::endl;
for (uint32_t i = 0; i < copySplit.count; ++i) {
const auto& copy = copySplit.copies[i];
os << " " << i << ": Texture at (" << copy.textureOffset.x << ", "
<< copy.textureOffset.y << ", " << copy.textureOffset.z << "), size ("
<< copy.copySize.width << ", " << copy.copySize.height << ", "
<< copy.copySize.depthOrArrayLayers << ")" << std::endl;
os << " " << i << ": Buffer at (" << copy.bufferOffset.x << ", "
<< copy.bufferOffset.y << ", " << copy.bufferOffset.z << "), footprint ("
<< copy.bufferSize.width << ", " << copy.bufferSize.height << ", "
<< copy.bufferSize.depthOrArrayLayers << ")" << std::endl;
}
return os;
}
// Define base texture sizes and offsets to test with: some aligned, some unaligned
constexpr TextureSpec kBaseTextureSpecs[] = {
{0, 0, 0, 1, 1, 1, 4},
{0, 0, 0, 64, 1, 1, 4},
{0, 0, 0, 128, 1, 1, 4},
{0, 0, 0, 192, 1, 1, 4},
{31, 16, 0, 1, 1, 1, 4},
{64, 16, 0, 1, 1, 1, 4},
{64, 16, 8, 1, 1, 1, 4},
{0, 0, 0, 64, 2, 1, 4},
{0, 0, 0, 64, 1, 2, 4},
{0, 0, 0, 64, 2, 2, 4},
{0, 0, 0, 128, 2, 1, 4},
{0, 0, 0, 128, 1, 2, 4},
{0, 0, 0, 128, 2, 2, 4},
{0, 0, 0, 192, 2, 1, 4},
{0, 0, 0, 192, 1, 2, 4},
{0, 0, 0, 192, 2, 2, 4},
{0, 0, 0, 1024, 1024, 1, 4},
{256, 512, 0, 1024, 1024, 1, 4},
{64, 48, 0, 1024, 1024, 1, 4},
{64, 48, 16, 1024, 1024, 1024, 4},
{0, 0, 0, 257, 31, 1, 4},
{0, 0, 0, 17, 93, 1, 4},
{59, 13, 0, 257, 31, 1, 4},
{17, 73, 0, 17, 93, 1, 4},
{17, 73, 59, 17, 93, 99, 4},
{0, 0, 0, 4, 4, 1, 8, 4, 4},
{64, 16, 0, 4, 4, 1, 8, 4, 4},
{64, 16, 8, 4, 4, 1, 8, 4, 4},
{0, 0, 0, 4, 4, 1, 16, 4, 4},
{64, 16, 0, 4, 4, 1, 16, 4, 4},
{64, 16, 8, 4, 4, 1, 16, 4, 4},
{0, 0, 0, 1024, 1024, 1, 8, 4, 4},
{256, 512, 0, 1024, 1024, 1, 8, 4, 4},
{64, 48, 0, 1024, 1024, 1, 8, 4, 4},
{64, 48, 16, 1024, 1024, 1, 8, 4, 4},
{0, 0, 0, 1024, 1024, 1, 16, 4, 4},
{256, 512, 0, 1024, 1024, 1, 16, 4, 4},
{64, 48, 0, 1024, 1024, 1, 4, 16, 4},
{64, 48, 16, 1024, 1024, 1, 16, 4, 4},
};
// Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow
// is the minimum required
std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width,
kTextureBytesPerRowAlignment);
auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t {
return value == 0 ? 0 : ((value - 1) / size + 1) * size;
};
return {
BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height * 2},
BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height * 2},
BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height},
BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
textureSpec.height * 2},
};
}
// Define a list of values to set properties in the spec structs
constexpr uint32_t kCheckValues[] = {
1, 2, 3, 4, 5, 6, 7, 8, // small values
16, 32, 64, 128, 256, 512, 1024, 2048, // powers of 2
15, 31, 63, 127, 257, 511, 1023, 2047, // misalignments
17, 33, 65, 129, 257, 513, 1025, 2049};
} // namespace
class CopySplitTest : public testing::TestWithParam<wgpu::TextureDimension> {
protected:
void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) {
ASSERT(textureSpec.width % textureSpec.blockWidth == 0 &&
textureSpec.height % textureSpec.blockHeight == 0);
wgpu::TextureDimension dimension = GetParam();
TextureCopySubresource copySplit;
switch (dimension) {
case wgpu::TextureDimension::e2D: {
copySplit = Compute2DTextureCopySubresource(
{textureSpec.x, textureSpec.y, textureSpec.z},
{textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
{textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
textureSpec.blockHeight},
bufferSpec.offset, bufferSpec.bytesPerRow);
break;
}
case wgpu::TextureDimension::e3D: {
copySplit = Compute3DTextureCopySplits(
{textureSpec.x, textureSpec.y, textureSpec.z},
{textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
{textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
textureSpec.blockHeight},
bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
break;
}
default:
UNREACHABLE();
break;
}
ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension);
if (HasFatalFailure()) {
std::ostringstream message;
message << "Failed generating splits: " << textureSpec << ", " << bufferSpec
<< std::endl
<< dimension << " " << copySplit << std::endl;
FAIL() << message.str();
}
}
};
TEST_P(CopySplitTest, General) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec); DoTest(textureSpec, bufferSpec);
} }
} }
} }
}
TEST_P(CopySplitTest, TextureHeight) { TEST_P(CopySplitTest, TextureWidth) {
for (TextureSpec textureSpec : kBaseTextureSpecs) { for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (uint32_t val : kCheckValues) {
if (val % textureSpec.blockHeight != 0) {
continue;
}
textureSpec.height = val;
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, TextureX) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (uint32_t val : kCheckValues) {
textureSpec.x = val;
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, TextureY) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (uint32_t val : kCheckValues) {
textureSpec.y = val;
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, TexelSize) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (uint32_t texelSize : {4, 8, 16, 32, 64}) {
textureSpec.texelBlockSizeInBytes = texelSize;
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, BufferOffset) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
for (uint32_t val : kCheckValues) { for (uint32_t val : kCheckValues) {
bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val; if (val % textureSpec.blockWidth != 0) {
continue;
DoTest(textureSpec, bufferSpec); }
textureSpec.width = val;
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
} }
} }
} }
}
TEST_P(CopySplitTest, RowPitch) { TEST_P(CopySplitTest, TextureHeight) {
for (TextureSpec textureSpec : kBaseTextureSpecs) { for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { for (uint32_t val : kCheckValues) {
uint32_t baseRowPitch = bufferSpec.bytesPerRow; if (val % textureSpec.blockHeight != 0) {
for (uint32_t i = 0; i < 5; ++i) { continue;
bufferSpec.bytesPerRow = baseRowPitch + i * 256; }
textureSpec.height = val;
DoTest(textureSpec, bufferSpec); for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
} }
} }
} }
}
TEST_P(CopySplitTest, ImageHeight) { TEST_P(CopySplitTest, TextureX) {
for (TextureSpec textureSpec : kBaseTextureSpecs) { for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) { for (uint32_t val : kCheckValues) {
uint32_t baseImageHeight = bufferSpec.rowsPerImage; textureSpec.x = val;
for (uint32_t i = 0; i < 5; ++i) { for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
bufferSpec.rowsPerImage = baseImageHeight + i * 256; DoTest(textureSpec, bufferSpec);
}
DoTest(textureSpec, bufferSpec);
} }
} }
} }
}
INSTANTIATE_TEST_SUITE_P(, TEST_P(CopySplitTest, TextureY) {
CopySplitTest, for (TextureSpec textureSpec : kBaseTextureSpecs) {
testing::Values(wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D)); for (uint32_t val : kCheckValues) {
textureSpec.y = val;
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, TexelSize) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (uint32_t texelSize : {4, 8, 16, 32, 64}) {
textureSpec.texelBlockSizeInBytes = texelSize;
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, BufferOffset) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
for (uint32_t val : kCheckValues) {
bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val;
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, RowPitch) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
uint32_t baseRowPitch = bufferSpec.bytesPerRow;
for (uint32_t i = 0; i < 5; ++i) {
bufferSpec.bytesPerRow = baseRowPitch + i * 256;
DoTest(textureSpec, bufferSpec);
}
}
}
}
TEST_P(CopySplitTest, ImageHeight) {
for (TextureSpec textureSpec : kBaseTextureSpecs) {
for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
uint32_t baseImageHeight = bufferSpec.rowsPerImage;
for (uint32_t i = 0; i < 5; ++i) {
bufferSpec.rowsPerImage = baseImageHeight + i * 256;
DoTest(textureSpec, bufferSpec);
}
}
}
}
INSTANTIATE_TEST_SUITE_P(,
CopySplitTest,
testing::Values(wgpu::TextureDimension::e2D,
wgpu::TextureDimension::e3D));
} // namespace dawn::native::d3d12

View File

@ -21,292 +21,295 @@
#include "dawn/tests/DawnNativeTest.h" #include "dawn/tests/DawnNativeTest.h"
#include "dawn/utils/WGPUHelpers.h" #include "dawn/utils/WGPUHelpers.h"
class CommandBufferEncodingTests : public DawnNativeTest { namespace dawn::native {
protected:
void ExpectCommands(dawn::native::CommandIterator* commands, class CommandBufferEncodingTests : public DawnNativeTest {
std::vector<std::pair<dawn::native::Command, protected:
std::function<void(dawn::native::CommandIterator*)>>> void ExpectCommands(
expectedCommands) { dawn::native::CommandIterator* commands,
dawn::native::Command commandId; std::vector<std::pair<dawn::native::Command,
for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) { std::function<void(dawn::native::CommandIterator*)>>>
ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command"; expectedCommands) {
ASSERT_EQ(commandId, expectedCommands[commandIndex].first) dawn::native::Command commandId;
<< "at command " << commandIndex; for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) {
expectedCommands[commandIndex].second(commands); ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command";
ASSERT_EQ(commandId, expectedCommands[commandIndex].first)
<< "at command " << commandIndex;
expectedCommands[commandIndex].second(commands);
}
} }
} };
};
// Indirect dispatch validation changes the bind groups in the middle // Indirect dispatch validation changes the bind groups in the middle
// of a pass. Test that bindings are restored after the validation runs. // of a pass. Test that bindings are restored after the validation runs.
TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) { TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) {
using namespace dawn::native; wgpu::BindGroupLayout staticLayout =
utils::MakeBindGroupLayout(device, {{
0,
wgpu::ShaderStage::Compute,
wgpu::BufferBindingType::Uniform,
}});
wgpu::BindGroupLayout staticLayout = wgpu::BindGroupLayout dynamicLayout =
utils::MakeBindGroupLayout(device, {{ utils::MakeBindGroupLayout(device, {{
0, 0,
wgpu::ShaderStage::Compute, wgpu::ShaderStage::Compute,
wgpu::BufferBindingType::Uniform, wgpu::BufferBindingType::Uniform,
}}); true,
}});
wgpu::BindGroupLayout dynamicLayout = // Create a simple pipeline
utils::MakeBindGroupLayout(device, {{ wgpu::ComputePipelineDescriptor csDesc;
0, csDesc.compute.module = utils::CreateShaderModule(device, R"(
wgpu::ShaderStage::Compute,
wgpu::BufferBindingType::Uniform,
true,
}});
// Create a simple pipeline
wgpu::ComputePipelineDescriptor csDesc;
csDesc.compute.module = utils::CreateShaderModule(device, R"(
@stage(compute) @workgroup_size(1, 1, 1) @stage(compute) @workgroup_size(1, 1, 1)
fn main() { fn main() {
})"); })");
csDesc.compute.entryPoint = "main"; csDesc.compute.entryPoint = "main";
wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout}); wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout});
csDesc.layout = pl0; csDesc.layout = pl0;
wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc); wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc);
wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout}); wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout});
csDesc.layout = pl1; csDesc.layout = pl1;
wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc); wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc);
// Create buffers to use for both the indirect buffer and the bind groups. // Create buffers to use for both the indirect buffer and the bind groups.
wgpu::Buffer indirectBuffer = wgpu::Buffer indirectBuffer = utils::CreateBufferFromData<uint32_t>(
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4}); device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
wgpu::BufferDescriptor uniformBufferDesc = {}; wgpu::BufferDescriptor uniformBufferDesc = {};
uniformBufferDesc.size = 512; uniformBufferDesc.size = 512;
uniformBufferDesc.usage = wgpu::BufferUsage::Uniform; uniformBufferDesc.usage = wgpu::BufferUsage::Uniform;
wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc); wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc);
wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}}); wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}});
wgpu::BindGroup dynamicBG = wgpu::BindGroup dynamicBG =
utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}}); utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}});
uint32_t dynamicOffset = 256; uint32_t dynamicOffset = 256;
std::vector<uint32_t> emptyDynamicOffsets = {}; std::vector<uint32_t> emptyDynamicOffsets = {};
std::vector<uint32_t> singleDynamicOffset = {dynamicOffset}; std::vector<uint32_t> singleDynamicOffset = {dynamicOffset};
// Begin encoding commands. // Begin encoding commands.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
CommandBufferStateTracker* stateTracker = CommandBufferStateTracker* stateTracker =
FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting(); FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
// Perform a dispatch indirect which will be preceded by a validation dispatch. // Perform a dispatch indirect which will be preceded by a validation dispatch.
pass.SetPipeline(pipeline0); pass.SetPipeline(pipeline0);
pass.SetBindGroup(0, staticBG); pass.SetBindGroup(0, staticBG);
pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset); pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset);
EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
pass.DispatchIndirect(indirectBuffer, 0); pass.DispatchIndirect(indirectBuffer, 0);
// Expect restored state. // Expect restored state.
EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get()); EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get()); EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets); EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get()); EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset); EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
// Dispatch again to check that the restored state can be used. // Dispatch again to check that the restored state can be used.
// Also pass an indirect offset which should get replaced with the offset // Also pass an indirect offset which should get replaced with the offset
// into the scratch indirect buffer (0). // into the scratch indirect buffer (0).
pass.DispatchIndirect(indirectBuffer, 4); pass.DispatchIndirect(indirectBuffer, 4);
// Expect restored state. // Expect restored state.
EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get()); EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get()); EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get()); EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets); EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get()); EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset); EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
// Change the pipeline // Change the pipeline
pass.SetPipeline(pipeline1); pass.SetPipeline(pipeline1);
pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset); pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset);
pass.SetBindGroup(1, staticBG); pass.SetBindGroup(1, staticBG);
EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get()); EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get()); EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
pass.DispatchIndirect(indirectBuffer, 0); pass.DispatchIndirect(indirectBuffer, 0);
// Expect restored state. // Expect restored state.
EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get()); EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get()); EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get()); EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get());
EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset); EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset);
EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get()); EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get());
EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets); EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets);
pass.End(); pass.End();
wgpu::CommandBuffer commandBuffer = encoder.Finish(); wgpu::CommandBuffer commandBuffer = encoder.Finish();
auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) { auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) {
return [pipeline](CommandIterator* commands) { return [pipeline](CommandIterator* commands) {
auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get());
};
};
auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg,
std::vector<uint32_t> offsets = {}) {
return [index, bg, offsets](CommandIterator* commands) {
auto* cmd = commands->NextCommand<SetBindGroupCmd>();
uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
dynamicOffsets = commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
ASSERT_EQ(cmd->index, BindGroupIndex(index));
ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get());
ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size());
for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) {
ASSERT_EQ(dynamicOffsets[i], offsets[i]);
}
};
};
// Initialize as null. Once we know the pointer, we'll check
// that it's the same buffer every time.
WGPUBuffer indirectScratchBuffer = nullptr;
auto ExpectDispatchIndirect = [&](CommandIterator* commands) {
auto* cmd = commands->NextCommand<DispatchIndirectCmd>();
if (indirectScratchBuffer == nullptr) {
indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get());
}
ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer);
ASSERT_EQ(cmd->indirectOffset, uint64_t(0));
};
// Initialize as null. Once we know the pointer, we'll check
// that it's the same pipeline every time.
WGPUComputePipeline validationPipeline = nullptr;
auto ExpectSetValidationPipeline = [&](CommandIterator* commands) {
auto* cmd = commands->NextCommand<SetComputePipelineCmd>(); auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get()); WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get());
if (validationPipeline != nullptr) {
EXPECT_EQ(pipeline, validationPipeline);
} else {
EXPECT_NE(pipeline, nullptr);
validationPipeline = pipeline;
}
}; };
};
auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg, auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) {
std::vector<uint32_t> offsets = {}) {
return [index, bg, offsets](CommandIterator* commands) {
auto* cmd = commands->NextCommand<SetBindGroupCmd>(); auto* cmd = commands->NextCommand<SetBindGroupCmd>();
uint32_t* dynamicOffsets = nullptr; ASSERT_EQ(cmd->index, BindGroupIndex(0));
if (cmd->dynamicOffsetCount > 0) { ASSERT_NE(cmd->group.Get(), nullptr);
dynamicOffsets = commands->NextData<uint32_t>(cmd->dynamicOffsetCount); ASSERT_EQ(cmd->dynamicOffsetCount, 0u);
}
ASSERT_EQ(cmd->index, BindGroupIndex(index));
ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get());
ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size());
for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) {
ASSERT_EQ(dynamicOffsets[i], offsets[i]);
}
}; };
};
// Initialize as null. Once we know the pointer, we'll check auto ExpectSetValidationDispatch = [&](CommandIterator* commands) {
// that it's the same buffer every time. auto* cmd = commands->NextCommand<DispatchCmd>();
WGPUBuffer indirectScratchBuffer = nullptr; ASSERT_EQ(cmd->x, 1u);
auto ExpectDispatchIndirect = [&](CommandIterator* commands) { ASSERT_EQ(cmd->y, 1u);
auto* cmd = commands->NextCommand<DispatchIndirectCmd>(); ASSERT_EQ(cmd->z, 1u);
if (indirectScratchBuffer == nullptr) { };
indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get());
}
ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer);
ASSERT_EQ(cmd->indirectOffset, uint64_t(0));
};
// Initialize as null. Once we know the pointer, we'll check ExpectCommands(
// that it's the same pipeline every time. FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(),
WGPUComputePipeline validationPipeline = nullptr; {
auto ExpectSetValidationPipeline = [&](CommandIterator* commands) { {Command::BeginComputePass,
auto* cmd = commands->NextCommand<SetComputePipelineCmd>(); [&](CommandIterator* commands) {
WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get()); SkipCommand(commands, Command::BeginComputePass);
if (validationPipeline != nullptr) { }},
EXPECT_EQ(pipeline, validationPipeline); // Expect the state to be set.
} else { {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
EXPECT_NE(pipeline, nullptr); {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
validationPipeline = pipeline; {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
}
};
auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) { // Expect the validation.
auto* cmd = commands->NextCommand<SetBindGroupCmd>(); {Command::SetComputePipeline, ExpectSetValidationPipeline},
ASSERT_EQ(cmd->index, BindGroupIndex(0)); {Command::SetBindGroup, ExpectSetValidationBindGroup},
ASSERT_NE(cmd->group.Get(), nullptr); {Command::Dispatch, ExpectSetValidationDispatch},
ASSERT_EQ(cmd->dynamicOffsetCount, 0u);
};
auto ExpectSetValidationDispatch = [&](CommandIterator* commands) { // Expect the state to be restored.
auto* cmd = commands->NextCommand<DispatchCmd>(); {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
ASSERT_EQ(cmd->x, 1u); {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
ASSERT_EQ(cmd->y, 1u); {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
ASSERT_EQ(cmd->z, 1u);
};
ExpectCommands( // Expect the dispatchIndirect.
FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(), {Command::DispatchIndirect, ExpectDispatchIndirect},
{
{Command::BeginComputePass,
[&](CommandIterator* commands) { SkipCommand(commands, Command::BeginComputePass); }},
// Expect the state to be set.
{Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
{Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
{Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
// Expect the validation. // Expect the validation.
{Command::SetComputePipeline, ExpectSetValidationPipeline}, {Command::SetComputePipeline, ExpectSetValidationPipeline},
{Command::SetBindGroup, ExpectSetValidationBindGroup}, {Command::SetBindGroup, ExpectSetValidationBindGroup},
{Command::Dispatch, ExpectSetValidationDispatch}, {Command::Dispatch, ExpectSetValidationDispatch},
// Expect the state to be restored. // Expect the state to be restored.
{Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
{Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
{Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
// Expect the dispatchIndirect. // Expect the dispatchIndirect.
{Command::DispatchIndirect, ExpectDispatchIndirect}, {Command::DispatchIndirect, ExpectDispatchIndirect},
// Expect the validation. // Expect the state to be set (new pipeline).
{Command::SetComputePipeline, ExpectSetValidationPipeline}, {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
{Command::SetBindGroup, ExpectSetValidationBindGroup}, {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
{Command::Dispatch, ExpectSetValidationDispatch}, {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
// Expect the state to be restored. // Expect the validation.
{Command::SetComputePipeline, ExpectSetPipeline(pipeline0)}, {Command::SetComputePipeline, ExpectSetValidationPipeline},
{Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)}, {Command::SetBindGroup, ExpectSetValidationBindGroup},
{Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})}, {Command::Dispatch, ExpectSetValidationDispatch},
// Expect the dispatchIndirect. // Expect the state to be restored.
{Command::DispatchIndirect, ExpectDispatchIndirect}, {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
{Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
{Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
// Expect the state to be set (new pipeline). // Expect the dispatchIndirect.
{Command::SetComputePipeline, ExpectSetPipeline(pipeline1)}, {Command::DispatchIndirect, ExpectDispatchIndirect},
{Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
{Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
// Expect the validation. {Command::EndComputePass,
{Command::SetComputePipeline, ExpectSetValidationPipeline}, [&](CommandIterator* commands) { commands->NextCommand<EndComputePassCmd>(); }},
{Command::SetBindGroup, ExpectSetValidationBindGroup}, });
{Command::Dispatch, ExpectSetValidationDispatch}, }
// Expect the state to be restored. // Test that after restoring state, it is fully applied to the state tracker
{Command::SetComputePipeline, ExpectSetPipeline(pipeline1)}, // and does not leak state changes that occured between a snapshot and the
{Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})}, // state restoration.
{Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)}, TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
// Expect the dispatchIndirect. CommandBufferStateTracker* stateTracker =
{Command::DispatchIndirect, ExpectDispatchIndirect}, FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
{Command::EndComputePass, // Snapshot the state.
[&](CommandIterator* commands) { commands->NextCommand<EndComputePassCmd>(); }}, CommandBufferStateTracker snapshot = *stateTracker;
}); // Expect no pipeline in the snapshot
} EXPECT_FALSE(snapshot.HasPipeline());
// Test that after restoring state, it is fully applied to the state tracker // Create a simple pipeline
// and does not leak state changes that occured between a snapshot and the wgpu::ComputePipelineDescriptor csDesc;
// state restoration. csDesc.compute.module = utils::CreateShaderModule(device, R"(
TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
using namespace dawn::native;
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
CommandBufferStateTracker* stateTracker =
FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
// Snapshot the state.
CommandBufferStateTracker snapshot = *stateTracker;
// Expect no pipeline in the snapshot
EXPECT_FALSE(snapshot.HasPipeline());
// Create a simple pipeline
wgpu::ComputePipelineDescriptor csDesc;
csDesc.compute.module = utils::CreateShaderModule(device, R"(
@stage(compute) @workgroup_size(1, 1, 1) @stage(compute) @workgroup_size(1, 1, 1)
fn main() { fn main() {
})"); })");
csDesc.compute.entryPoint = "main"; csDesc.compute.entryPoint = "main";
wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc); wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
// Set the pipeline. // Set the pipeline.
pass.SetPipeline(pipeline); pass.SetPipeline(pipeline);
// Expect the pipeline to be set. // Expect the pipeline to be set.
EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get()); EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get());
// Restore the state. // Restore the state.
FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot)); FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot));
// Expect no pipeline // Expect no pipeline
EXPECT_FALSE(stateTracker->HasPipeline()); EXPECT_FALSE(stateTracker->HasPipeline());
} }
} // namespace dawn::native

View File

@ -25,9 +25,13 @@
namespace { namespace {
using namespace testing; using testing::Contains;
using testing::MockCallback;
using testing::NotNull;
using testing::SaveArg;
using testing::StrEq;
class DeviceCreationTest : public Test { class DeviceCreationTest : public testing::Test {
protected: protected:
void SetUp() override { void SetUp() override {
dawnProcSetProcs(&dawn::native::GetProcs()); dawnProcSetProcs(&dawn::native::GetProcs());
@ -83,7 +87,7 @@ namespace {
EXPECT_NE(device, nullptr); EXPECT_NE(device, nullptr);
auto toggles = dawn::native::GetTogglesUsed(device.Get()); auto toggles = dawn::native::GetTogglesUsed(device.Get());
EXPECT_THAT(toggles, testing::Contains(testing::StrEq(toggle))); EXPECT_THAT(toggles, Contains(StrEq(toggle)));
} }
TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) { TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) {

View File

@ -18,7 +18,8 @@
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "dawn/tests/unittests/validation/ValidationTest.h" #include "dawn/tests/unittests/validation/ValidationTest.h"
using namespace testing; using testing::_;
using testing::InvokeWithoutArgs;
class MockBufferMapAsyncCallback { class MockBufferMapAsyncCallback {
public: public:

View File

@ -18,7 +18,9 @@
#include "dawn/tests/unittests/validation/ValidationTest.h" #include "dawn/tests/unittests/validation/ValidationTest.h"
#include "gmock/gmock.h" #include "gmock/gmock.h"
using namespace testing; using testing::_;
using testing::MockCallback;
using testing::Sequence;
class MockDevicePopErrorScopeCallback { class MockDevicePopErrorScopeCallback {
public: public:
@ -170,7 +172,7 @@ TEST_F(ErrorScopeValidationTest, EnclosedQueueSubmitNested) {
queue.Submit(0, nullptr); queue.Submit(0, nullptr);
queue.OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this); queue.OnSubmittedWorkDone(0u, ToMockQueueWorkDone, this);
testing::Sequence seq; Sequence seq;
MockCallback<WGPUErrorCallback> errorScopeCallback2; MockCallback<WGPUErrorCallback> errorScopeCallback2;
EXPECT_CALL(errorScopeCallback2, Call(WGPUErrorType_NoError, _, this + 1)).InSequence(seq); EXPECT_CALL(errorScopeCallback2, Call(WGPUErrorType_NoError, _, this + 1)).InSequence(seq);

View File

@ -16,7 +16,12 @@
#include "dawn/tests/MockCallback.h" #include "dawn/tests/MockCallback.h"
using namespace testing; using testing::_;
using testing::Invoke;
using testing::MockCallback;
using testing::NotNull;
using testing::StrictMock;
using testing::WithArg;
class MultipleDeviceTest : public ValidationTest {}; class MultipleDeviceTest : public ValidationTest {};

View File

@ -17,8 +17,6 @@
#include "dawn/tests/unittests/validation/ValidationTest.h" #include "dawn/tests/unittests/validation/ValidationTest.h"
#include "gmock/gmock.h" #include "gmock/gmock.h"
using namespace testing;
class MockQueueWorkDoneCallback { class MockQueueWorkDoneCallback {
public: public:
MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata)); MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));

View File

@ -23,10 +23,17 @@
#include "webgpu/webgpu_cpp.h" #include "webgpu/webgpu_cpp.h"
namespace { namespace dawn::wire { namespace {
using namespace testing; using testing::_;
using namespace dawn::wire; using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::MockCallback;
using testing::NotNull;
using testing::Return;
using testing::SaveArg;
using testing::StrEq;
using testing::WithArg;
class WireAdapterTests : public WireTest { class WireAdapterTests : public WireTest {
protected: protected:
@ -328,4 +335,6 @@ namespace {
GetWireClient()->Disconnect(); GetWireClient()->Disconnect();
} }
} // anonymous namespace // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::wire::

View File

@ -18,239 +18,253 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/common/Constants.h" #include "dawn/common/Constants.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireArgumentTests : public WireTest { using testing::_;
public: using testing::Return;
WireArgumentTests() { using testing::Sequence;
}
~WireArgumentTests() override = default;
};
// Test that the wire is able to send numerical values class WireArgumentTests : public WireTest {
TEST_F(WireArgumentTests, ValueArgument) { public:
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); WireArgumentTests() {
WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr); }
wgpuComputePassEncoderDispatch(pass, 1, 2, 3); ~WireArgumentTests() override = default;
};
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); // Test that the wire is able to send numerical values
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder)); TEST_F(WireArgumentTests, ValueArgument) {
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
wgpuComputePassEncoderDispatch(pass, 1, 2, 3);
WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder(); WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
FlushClient();
}
// Test that the wire is able to send arrays of numerical values
TEST_F(WireArgumentTests, ValueArrayArgument) {
// Create a bindgroup.
WGPUBindGroupLayoutDescriptor bglDescriptor = {};
bglDescriptor.entryCount = 0;
bglDescriptor.entries = nullptr;
WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
WGPUBindGroupDescriptor bindGroupDescriptor = {};
bindGroupDescriptor.layout = bgl;
bindGroupDescriptor.entryCount = 0;
bindGroupDescriptor.entries = nullptr;
WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor);
WGPUBindGroup apiBindGroup = api.GetNewBindGroup();
EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup));
// Use the bindgroup in SetBindGroup that takes an array of value offsets.
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(), testOffsets.data());
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
apiPass, 0, apiBindGroup, testOffsets.size(),
MatchesLambda([testOffsets](const uint32_t* offsets) -> bool {
for (size_t i = 0; i < testOffsets.size(); i++) {
if (offsets[i] != testOffsets[i]) {
return false;
}
}
return true;
})));
FlushClient();
}
// Test that the wire is able to send C strings
TEST_F(WireArgumentTests, CStringArgument) {
// Create shader module
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
// Create the color state descriptor
WGPUBlendComponent blendComponent = {};
blendComponent.operation = WGPUBlendOperation_Add;
blendComponent.srcFactor = WGPUBlendFactor_One;
blendComponent.dstFactor = WGPUBlendFactor_One;
WGPUBlendState blendState = {};
blendState.alpha = blendComponent;
blendState.color = blendComponent;
WGPUColorTargetState colorTargetState = {};
colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
colorTargetState.blend = &blendState;
colorTargetState.writeMask = WGPUColorWriteMask_All;
// Create the depth-stencil state
WGPUStencilFaceState stencilFace = {};
stencilFace.compare = WGPUCompareFunction_Always;
stencilFace.failOp = WGPUStencilOperation_Keep;
stencilFace.depthFailOp = WGPUStencilOperation_Keep;
stencilFace.passOp = WGPUStencilOperation_Keep;
WGPUDepthStencilState depthStencilState = {};
depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
depthStencilState.depthWriteEnabled = false;
depthStencilState.depthCompare = WGPUCompareFunction_Always;
depthStencilState.stencilBack = stencilFace;
depthStencilState.stencilFront = stencilFace;
depthStencilState.stencilReadMask = 0xff;
depthStencilState.stencilWriteMask = 0xff;
depthStencilState.depthBias = 0;
depthStencilState.depthBiasSlopeScale = 0.0;
depthStencilState.depthBiasClamp = 0.0;
// Create the pipeline layout
WGPUPipelineLayoutDescriptor layoutDescriptor = {};
layoutDescriptor.bindGroupLayoutCount = 0;
layoutDescriptor.bindGroupLayouts = nullptr;
WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
// Create pipeline
WGPURenderPipelineDescriptor pipelineDescriptor = {};
pipelineDescriptor.vertex.module = vsModule;
pipelineDescriptor.vertex.entryPoint = "main";
pipelineDescriptor.vertex.bufferCount = 0;
pipelineDescriptor.vertex.buffers = nullptr;
WGPUFragmentState fragment = {};
fragment.module = vsModule;
fragment.entryPoint = "main";
fragment.targetCount = 1;
fragment.targets = &colorTargetState;
pipelineDescriptor.fragment = &fragment;
pipelineDescriptor.multisample.count = 1;
pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
pipelineDescriptor.layout = layout;
pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
pipelineDescriptor.depthStencil = &depthStencilState;
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
EXPECT_CALL(api,
DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->vertex.entryPoint == std::string("main");
})))
.WillOnce(Return(apiPlaceholderPipeline));
FlushClient();
}
// Test that the wire is able to send objects as value arguments
TEST_F(WireArgumentTests, ObjectAsValueArgument) {
WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
WGPUBufferDescriptor descriptor = {};
descriptor.size = 8;
descriptor.usage =
static_cast<WGPUBufferUsage>(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst);
WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
WGPUBuffer apiBuffer = api.GetNewBuffer();
EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
.WillOnce(Return(apiBuffer))
.RetiresOnSaturation();
wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
EXPECT_CALL(api, CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
FlushClient();
}
// Test that the wire is able to send array of objects
TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
WGPUCommandBuffer cmdBufs[2];
WGPUCommandBuffer apiCmdBufs[2];
// Create two command buffers we need to use a GMock sequence otherwise the order of the
// CreateCommandEncoder might be swapped since they are equivalent in term of matchers
Sequence s;
for (int i = 0; i < 2; ++i) {
WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.InSequence(s) .WillOnce(Return(apiEncoder));
.WillOnce(Return(apiCmdBufEncoder));
apiCmdBufs[i] = api.GetNewCommandBuffer(); WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)) EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
.WillOnce(Return(apiCmdBufs[i])); .WillOnce(Return(apiPass));
EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
FlushClient();
} }
// Submit command buffer and check we got a call with both API-side command buffers // Test that the wire is able to send arrays of numerical values
wgpuQueueSubmit(queue, 2, cmdBufs); TEST_F(WireArgumentTests, ValueArrayArgument) {
// Create a bindgroup.
WGPUBindGroupLayoutDescriptor bglDescriptor = {};
bglDescriptor.entryCount = 0;
bglDescriptor.entries = nullptr;
EXPECT_CALL( WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
api, QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool { WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1]; EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
})));
FlushClient(); WGPUBindGroupDescriptor bindGroupDescriptor = {};
} bindGroupDescriptor.layout = bgl;
bindGroupDescriptor.entryCount = 0;
bindGroupDescriptor.entries = nullptr;
// Test that the wire is able to send structures that contain pure values (non-objects) WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor);
TEST_F(WireArgumentTests, StructureOfValuesArgument) { WGPUBindGroup apiBindGroup = api.GetNewBindGroup();
WGPUSamplerDescriptor descriptor = {}; EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup));
descriptor.magFilter = WGPUFilterMode_Linear;
descriptor.minFilter = WGPUFilterMode_Nearest;
descriptor.mipmapFilter = WGPUFilterMode_Linear;
descriptor.addressModeU = WGPUAddressMode_ClampToEdge;
descriptor.addressModeV = WGPUAddressMode_Repeat;
descriptor.addressModeW = WGPUAddressMode_MirrorRepeat;
descriptor.lodMinClamp = kLodMin;
descriptor.lodMaxClamp = kLodMax;
descriptor.compare = WGPUCompareFunction_Never;
wgpuDeviceCreateSampler(device, &descriptor); // Use the bindgroup in SetBindGroup that takes an array of value offsets.
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
WGPUSampler apiPlaceholderSampler = api.GetNewSampler(); std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
EXPECT_CALL(api, DeviceCreateSampler( wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(),
apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool { testOffsets.data());
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiEncoder));
WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
.WillOnce(Return(apiPass));
EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
apiPass, 0, apiBindGroup, testOffsets.size(),
MatchesLambda([testOffsets](const uint32_t* offsets) -> bool {
for (size_t i = 0; i < testOffsets.size(); i++) {
if (offsets[i] != testOffsets[i]) {
return false;
}
}
return true;
})));
FlushClient();
}
// Test that the wire is able to send C strings
TEST_F(WireArgumentTests, CStringArgument) {
// Create shader module
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
// Create the color state descriptor
WGPUBlendComponent blendComponent = {};
blendComponent.operation = WGPUBlendOperation_Add;
blendComponent.srcFactor = WGPUBlendFactor_One;
blendComponent.dstFactor = WGPUBlendFactor_One;
WGPUBlendState blendState = {};
blendState.alpha = blendComponent;
blendState.color = blendComponent;
WGPUColorTargetState colorTargetState = {};
colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
colorTargetState.blend = &blendState;
colorTargetState.writeMask = WGPUColorWriteMask_All;
// Create the depth-stencil state
WGPUStencilFaceState stencilFace = {};
stencilFace.compare = WGPUCompareFunction_Always;
stencilFace.failOp = WGPUStencilOperation_Keep;
stencilFace.depthFailOp = WGPUStencilOperation_Keep;
stencilFace.passOp = WGPUStencilOperation_Keep;
WGPUDepthStencilState depthStencilState = {};
depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
depthStencilState.depthWriteEnabled = false;
depthStencilState.depthCompare = WGPUCompareFunction_Always;
depthStencilState.stencilBack = stencilFace;
depthStencilState.stencilFront = stencilFace;
depthStencilState.stencilReadMask = 0xff;
depthStencilState.stencilWriteMask = 0xff;
depthStencilState.depthBias = 0;
depthStencilState.depthBiasSlopeScale = 0.0;
depthStencilState.depthBiasClamp = 0.0;
// Create the pipeline layout
WGPUPipelineLayoutDescriptor layoutDescriptor = {};
layoutDescriptor.bindGroupLayoutCount = 0;
layoutDescriptor.bindGroupLayouts = nullptr;
WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
// Create pipeline
WGPURenderPipelineDescriptor pipelineDescriptor = {};
pipelineDescriptor.vertex.module = vsModule;
pipelineDescriptor.vertex.entryPoint = "main";
pipelineDescriptor.vertex.bufferCount = 0;
pipelineDescriptor.vertex.buffers = nullptr;
WGPUFragmentState fragment = {};
fragment.module = vsModule;
fragment.entryPoint = "main";
fragment.targetCount = 1;
fragment.targets = &colorTargetState;
pipelineDescriptor.fragment = &fragment;
pipelineDescriptor.multisample.count = 1;
pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
pipelineDescriptor.layout = layout;
pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
pipelineDescriptor.depthStencil = &depthStencilState;
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
EXPECT_CALL(
api, DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->vertex.entryPoint == std::string("main");
})))
.WillOnce(Return(apiPlaceholderPipeline));
FlushClient();
}
// Test that the wire is able to send objects as value arguments
TEST_F(WireArgumentTests, ObjectAsValueArgument) {
WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiEncoder));
WGPUBufferDescriptor descriptor = {};
descriptor.size = 8;
descriptor.usage =
static_cast<WGPUBufferUsage>(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst);
WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
WGPUBuffer apiBuffer = api.GetNewBuffer();
EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
.WillOnce(Return(apiBuffer))
.RetiresOnSaturation();
wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
EXPECT_CALL(api,
CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
FlushClient();
}
// Test that the wire is able to send array of objects
TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
WGPUCommandBuffer cmdBufs[2];
WGPUCommandBuffer apiCmdBufs[2];
// Create two command buffers we need to use a GMock sequence otherwise the order of the
// CreateCommandEncoder might be swapped since they are equivalent in term of matchers
Sequence s;
for (int i = 0; i < 2; ++i) {
WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.InSequence(s)
.WillOnce(Return(apiCmdBufEncoder));
apiCmdBufs[i] = api.GetNewCommandBuffer();
EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
.WillOnce(Return(apiCmdBufs[i]));
}
// Submit command buffer and check we got a call with both API-side command buffers
wgpuQueueSubmit(queue, 2, cmdBufs);
EXPECT_CALL(
api,
QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1];
})));
FlushClient();
}
// Test that the wire is able to send structures that contain pure values (non-objects)
TEST_F(WireArgumentTests, StructureOfValuesArgument) {
WGPUSamplerDescriptor descriptor = {};
descriptor.magFilter = WGPUFilterMode_Linear;
descriptor.minFilter = WGPUFilterMode_Nearest;
descriptor.mipmapFilter = WGPUFilterMode_Linear;
descriptor.addressModeU = WGPUAddressMode_ClampToEdge;
descriptor.addressModeV = WGPUAddressMode_Repeat;
descriptor.addressModeW = WGPUAddressMode_MirrorRepeat;
descriptor.lodMinClamp = kLodMin;
descriptor.lodMaxClamp = kLodMax;
descriptor.compare = WGPUCompareFunction_Never;
wgpuDeviceCreateSampler(device, &descriptor);
WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
EXPECT_CALL(
api, DeviceCreateSampler(
apiDevice,
MatchesLambda(
[](const WGPUSamplerDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && return desc->nextInChain == nullptr &&
desc->magFilter == WGPUFilterMode_Linear && desc->magFilter == WGPUFilterMode_Linear &&
desc->minFilter == WGPUFilterMode_Nearest && desc->minFilter == WGPUFilterMode_Nearest &&
@ -261,108 +275,111 @@ TEST_F(WireArgumentTests, StructureOfValuesArgument) {
desc->compare == WGPUCompareFunction_Never && desc->compare == WGPUCompareFunction_Never &&
desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax; desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax;
}))) })))
.WillOnce(Return(apiPlaceholderSampler)); .WillOnce(Return(apiPlaceholderSampler));
FlushClient(); FlushClient();
} }
// Test that the wire is able to send structures that contain objects // Test that the wire is able to send structures that contain objects
TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) { TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
WGPUBindGroupLayoutDescriptor bglDescriptor = {}; WGPUBindGroupLayoutDescriptor bglDescriptor = {};
bglDescriptor.entryCount = 0; bglDescriptor.entryCount = 0;
bglDescriptor.entries = nullptr; bglDescriptor.entries = nullptr;
WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl)); EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
WGPUPipelineLayoutDescriptor descriptor = {}; WGPUPipelineLayoutDescriptor descriptor = {};
descriptor.bindGroupLayoutCount = 1; descriptor.bindGroupLayoutCount = 1;
descriptor.bindGroupLayouts = &bgl; descriptor.bindGroupLayouts = &bgl;
wgpuDeviceCreatePipelineLayout(device, &descriptor); wgpuDeviceCreatePipelineLayout(device, &descriptor);
WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout(); WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
EXPECT_CALL(api, DeviceCreatePipelineLayout( EXPECT_CALL(
apiDevice, api, DeviceCreatePipelineLayout(
MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool { apiDevice,
return desc->nextInChain == nullptr && MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
desc->bindGroupLayoutCount == 1 && return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
desc->bindGroupLayouts[0] == apiBgl; desc->bindGroupLayouts[0] == apiBgl;
}))) })))
.WillOnce(Return(apiPlaceholderLayout)); .WillOnce(Return(apiPlaceholderLayout));
FlushClient(); FlushClient();
} }
// Test that the wire is able to send structures that contain objects // Test that the wire is able to send structures that contain objects
TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) { TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
static constexpr int NUM_BINDINGS = 3; static constexpr int NUM_BINDINGS = 3;
WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{ WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{
{nullptr, {nullptr,
0, 0,
WGPUShaderStage_Vertex, WGPUShaderStage_Vertex,
{}, {},
{nullptr, WGPUSamplerBindingType_Filtering}, {nullptr, WGPUSamplerBindingType_Filtering},
{}, {},
{}}, {}},
{nullptr, {nullptr,
1, 1,
WGPUShaderStage_Vertex, WGPUShaderStage_Vertex,
{}, {},
{}, {},
{nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false}, {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false},
{}}, {}},
{nullptr, {nullptr,
2, 2,
static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment), static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment),
{nullptr, WGPUBufferBindingType_Uniform, false, 0}, {nullptr, WGPUBufferBindingType_Uniform, false, 0},
{}, {},
{}, {},
{}}, {}},
}; };
WGPUBindGroupLayoutDescriptor bglDescriptor = {}; WGPUBindGroupLayoutDescriptor bglDescriptor = {};
bglDescriptor.entryCount = NUM_BINDINGS; bglDescriptor.entryCount = NUM_BINDINGS;
bglDescriptor.entries = entries; bglDescriptor.entries = entries;
wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor); wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout(); WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
EXPECT_CALL( EXPECT_CALL(api,
api, DeviceCreateBindGroupLayout(
DeviceCreateBindGroupLayout( apiDevice,
apiDevice, MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool { MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
for (int i = 0; i < NUM_BINDINGS; ++i) { for (int i = 0; i < NUM_BINDINGS; ++i) {
const auto& a = desc->entries[i]; const auto& a = desc->entries[i];
const auto& b = entries[i]; const auto& b = entries[i];
if (a.binding != b.binding || a.visibility != b.visibility || if (a.binding != b.binding || a.visibility != b.visibility ||
a.buffer.type != b.buffer.type || a.sampler.type != b.sampler.type || a.buffer.type != b.buffer.type ||
a.texture.sampleType != b.texture.sampleType) { a.sampler.type != b.sampler.type ||
return false; a.texture.sampleType != b.texture.sampleType) {
} return false;
} }
return desc->nextInChain == nullptr && desc->entryCount == 3; }
}))) return desc->nextInChain == nullptr && desc->entryCount == 3;
.WillOnce(Return(apiBgl)); })))
.WillOnce(Return(apiBgl));
FlushClient(); FlushClient();
} }
// Test passing nullptr instead of objects - array of objects version // Test passing nullptr instead of objects - array of objects version
TEST_F(WireArgumentTests, DISABLED_NullptrInArray) { TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
WGPUBindGroupLayout nullBGL = nullptr; WGPUBindGroupLayout nullBGL = nullptr;
WGPUPipelineLayoutDescriptor descriptor = {}; WGPUPipelineLayoutDescriptor descriptor = {};
descriptor.bindGroupLayoutCount = 1; descriptor.bindGroupLayoutCount = 1;
descriptor.bindGroupLayouts = &nullBGL; descriptor.bindGroupLayouts = &nullBGL;
wgpuDeviceCreatePipelineLayout(device, &descriptor); wgpuDeviceCreatePipelineLayout(device, &descriptor);
EXPECT_CALL(api, EXPECT_CALL(
DeviceCreatePipelineLayout( api, DeviceCreatePipelineLayout(
apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool { apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 && return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
desc->bindGroupLayouts[0] == nullptr; desc->bindGroupLayouts[0] == nullptr;
}))) })))
.WillOnce(Return(nullptr)); .WillOnce(Return(nullptr));
FlushClient(); FlushClient();
} }
} // namespace dawn::wire

View File

@ -14,67 +14,71 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireBasicTests : public WireTest { using testing::Return;
public:
WireBasicTests() { class WireBasicTests : public WireTest {
public:
WireBasicTests() {
}
~WireBasicTests() override = default;
};
// One call gets forwarded correctly.
TEST_F(WireBasicTests, CallForwarded) {
wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder));
FlushClient();
} }
~WireBasicTests() override = default;
};
// One call gets forwarded correctly. // Test that calling methods on a new object works as expected.
TEST_F(WireBasicTests, CallForwarded) { TEST_F(WireBasicTests, CreateThenCall) {
wgpuDeviceCreateCommandEncoder(device, nullptr); WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
wgpuCommandEncoderFinish(encoder, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder)); .WillOnce(Return(apiCmdBufEncoder));
FlushClient(); WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer();
} EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
.WillOnce(Return(apiCmdBuf));
// Test that calling methods on a new object works as expected. FlushClient();
TEST_F(WireBasicTests, CreateThenCall) { }
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
wgpuCommandEncoderFinish(encoder, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); // Test that client reference/release do not call the backend API.
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) TEST_F(WireBasicTests, RefCountKeptInClient) {
.WillOnce(Return(apiCmdBufEncoder)); WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer(); wgpuCommandEncoderReference(encoder);
EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)).WillOnce(Return(apiCmdBuf)); wgpuCommandEncoderRelease(encoder);
FlushClient(); WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
} EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder));
// Test that client reference/release do not call the backend API. FlushClient();
TEST_F(WireBasicTests, RefCountKeptInClient) { }
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
wgpuCommandEncoderReference(encoder); // Test that client reference/release do not call the backend API.
wgpuCommandEncoderRelease(encoder); TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); wgpuCommandEncoderRelease(encoder);
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder));
FlushClient(); WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
} EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder));
// Test that client reference/release do not call the backend API. EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder));
TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
wgpuCommandEncoderRelease(encoder); FlushClient();
}
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); } // namespace dawn::wire
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder));
EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder));
FlushClient();
}

File diff suppressed because it is too large Load Diff

View File

@ -17,361 +17,369 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire; namespace {
namespace { using testing::_;
using testing::InvokeWithoutArgs;
using testing::Mock;
using testing::Return;
using testing::Sequence;
using testing::StrEq;
using testing::StrictMock;
// Mock class to add expectations on the wire calling callbacks // Mock class to add expectations on the wire calling callbacks
class MockCreateComputePipelineAsyncCallback { class MockCreateComputePipelineAsyncCallback {
public:
MOCK_METHOD(void,
Call,
(WGPUCreatePipelineAsyncStatus status,
WGPUComputePipeline pipeline,
const char* message,
void* userdata));
};
std::unique_ptr<StrictMock<MockCreateComputePipelineAsyncCallback>>
mockCreateComputePipelineAsyncCallback;
void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
WGPUComputePipeline pipeline,
const char* message,
void* userdata) {
mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata);
}
class MockCreateRenderPipelineAsyncCallback {
public:
MOCK_METHOD(void,
Call,
(WGPUCreatePipelineAsyncStatus status,
WGPURenderPipeline pipeline,
const char* message,
void* userdata));
};
std::unique_ptr<StrictMock<MockCreateRenderPipelineAsyncCallback>>
mockCreateRenderPipelineAsyncCallback;
void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
WGPURenderPipeline pipeline,
const char* message,
void* userdata) {
mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata);
}
} // anonymous namespace
class WireCreatePipelineAsyncTest : public WireTest {
public: public:
MOCK_METHOD(void, void SetUp() override {
Call, WireTest::SetUp();
(WGPUCreatePipelineAsyncStatus status,
WGPUComputePipeline pipeline, mockCreateComputePipelineAsyncCallback =
const char* message, std::make_unique<StrictMock<MockCreateComputePipelineAsyncCallback>>();
void* userdata)); mockCreateRenderPipelineAsyncCallback =
std::make_unique<StrictMock<MockCreateRenderPipelineAsyncCallback>>();
}
void TearDown() override {
WireTest::TearDown();
// Delete mock so that expectations are checked
mockCreateComputePipelineAsyncCallback = nullptr;
mockCreateRenderPipelineAsyncCallback = nullptr;
}
void FlushClient() {
WireTest::FlushClient();
Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
}
void FlushServer() {
WireTest::FlushServer();
Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
}
}; };
std::unique_ptr<StrictMock<MockCreateComputePipelineAsyncCallback>> // Test when creating a compute pipeline with CreateComputePipelineAsync() successfully.
mockCreateComputePipelineAsyncCallback; TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) {
void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status, WGPUShaderModuleDescriptor csDescriptor{};
WGPUComputePipeline pipeline, WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
const char* message, WGPUShaderModule apiCsModule = api.GetNewShaderModule();
void* userdata) { EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata);
WGPUComputePipelineDescriptor descriptor{};
descriptor.compute.module = csModule;
descriptor.compute.entryPoint = "main";
wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
ToMockCreateComputePipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateComputePipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient();
EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
.Times(1);
FlushServer();
} }
class MockCreateRenderPipelineAsyncCallback { // Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error.
public: TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) {
MOCK_METHOD(void, WGPUShaderModuleDescriptor csDescriptor{};
Call, WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
(WGPUCreatePipelineAsyncStatus status, WGPUShaderModule apiCsModule = api.GetNewShaderModule();
WGPURenderPipeline pipeline, EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
const char* message,
void* userdata));
};
std::unique_ptr<StrictMock<MockCreateRenderPipelineAsyncCallback>> WGPUComputePipelineDescriptor descriptor{};
mockCreateRenderPipelineAsyncCallback; descriptor.compute.module = csModule;
void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status, descriptor.compute.entryPoint = "main";
WGPURenderPipeline pipeline,
const char* message, wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
void* userdata) { ToMockCreateComputePipelineAsyncCallback, this);
mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata);
EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateComputePipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
}));
FlushClient();
EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
.Times(1);
FlushServer();
} }
} // anonymous namespace // Test when creating a render pipeline with CreateRenderPipelineAsync() successfully.
TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) {
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
class WireCreatePipelineAsyncTest : public WireTest { WGPURenderPipelineDescriptor pipelineDescriptor{};
public: pipelineDescriptor.vertex.module = vsModule;
void SetUp() override { pipelineDescriptor.vertex.entryPoint = "main";
WireTest::SetUp();
mockCreateComputePipelineAsyncCallback = WGPUFragmentState fragment = {};
std::make_unique<StrictMock<MockCreateComputePipelineAsyncCallback>>(); fragment.module = vsModule;
mockCreateRenderPipelineAsyncCallback = fragment.entryPoint = "main";
std::make_unique<StrictMock<MockCreateRenderPipelineAsyncCallback>>(); pipelineDescriptor.fragment = &fragment;
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateRenderPipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient();
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
.Times(1);
FlushServer();
} }
void TearDown() override { // Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error.
WireTest::TearDown(); TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) {
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
// Delete mock so that expectations are checked WGPURenderPipelineDescriptor pipelineDescriptor{};
mockCreateComputePipelineAsyncCallback = nullptr; pipelineDescriptor.vertex.module = vsModule;
mockCreateRenderPipelineAsyncCallback = nullptr; pipelineDescriptor.vertex.entryPoint = "main";
WGPUFragmentState fragment = {};
fragment.module = vsModule;
fragment.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateRenderPipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
}));
FlushClient();
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
.Times(1);
FlushServer();
} }
void FlushClient() { // Test that registering a callback then wire disconnect calls the callback with
WireTest::FlushClient(); // DeviceLost.
Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback); TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) {
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
WGPUFragmentState fragment = {};
fragment.module = vsModule;
fragment.entryPoint = "main";
WGPURenderPipelineDescriptor pipelineDescriptor{};
pipelineDescriptor.vertex.module = vsModule;
pipelineDescriptor.vertex.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateRenderPipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient();
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
.Times(1);
GetWireClient()->Disconnect();
} }
void FlushServer() { // Test that registering a callback then wire disconnect calls the callback with
WireTest::FlushServer(); // DeviceLost.
Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback); TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) {
WGPUShaderModuleDescriptor csDescriptor{};
WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
WGPUShaderModule apiCsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
WGPUComputePipelineDescriptor descriptor{};
descriptor.compute.module = csModule;
descriptor.compute.entryPoint = "main";
wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
ToMockCreateComputePipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateComputePipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient();
EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
.Times(1);
GetWireClient()->Disconnect();
} }
};
// Test when creating a compute pipeline with CreateComputePipelineAsync() successfully. // Test that registering a callback after wire disconnect calls the callback with
TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) { // DeviceLost.
WGPUShaderModuleDescriptor csDescriptor{}; TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) {
WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor); WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule apiCsModule = api.GetNewShaderModule(); WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule)); WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
WGPUComputePipelineDescriptor descriptor{}; WGPUFragmentState fragment = {};
descriptor.compute.module = csModule; fragment.module = vsModule;
descriptor.compute.entryPoint = "main"; fragment.entryPoint = "main";
wgpuDeviceCreateComputePipelineAsync(device, &descriptor, WGPURenderPipelineDescriptor pipelineDescriptor{};
ToMockCreateComputePipelineAsyncCallback, this); pipelineDescriptor.vertex.module = vsModule;
pipelineDescriptor.vertex.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) FlushClient();
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateComputePipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient(); GetWireClient()->Disconnect();
EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this)) Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
.Times(1); .Times(1);
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
}
FlushServer(); // Test that registering a callback after wire disconnect calls the callback with
} // DeviceLost.
TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) {
WGPUShaderModuleDescriptor csDescriptor{};
WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
WGPUShaderModule apiCsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
// Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error. WGPUComputePipelineDescriptor descriptor{};
TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) { descriptor.compute.module = csModule;
WGPUShaderModuleDescriptor csDescriptor{}; descriptor.compute.entryPoint = "main";
WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
WGPUShaderModule apiCsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
WGPUComputePipelineDescriptor descriptor{}; FlushClient();
descriptor.compute.module = csModule;
descriptor.compute.entryPoint = "main";
wgpuDeviceCreateComputePipelineAsync(device, &descriptor, GetWireClient()->Disconnect();
ToMockCreateComputePipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _)) EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
.WillOnce(InvokeWithoutArgs([&]() { Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
api.CallDeviceCreateComputePipelineAsyncCallback( .Times(1);
apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
}));
FlushClient(); wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
ToMockCreateComputePipelineAsyncCallback, this);
}
EXPECT_CALL(*mockCreateComputePipelineAsyncCallback, TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this)) WGPUShaderModuleDescriptor vertexDescriptor = {};
.Times(1); WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule));
FlushServer(); WGPURenderPipelineDescriptor pipelineDescriptor{};
} pipelineDescriptor.vertex.module = module;
pipelineDescriptor.vertex.entryPoint = "main";
// Test when creating a render pipeline with CreateRenderPipelineAsync() successfully. WGPUFragmentState fragment = {};
TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) { fragment.module = module;
WGPUShaderModuleDescriptor vertexDescriptor = {}; fragment.entryPoint = "main";
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); pipelineDescriptor.fragment = &fragment;
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
WGPURenderPipelineDescriptor pipelineDescriptor{}; wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
pipelineDescriptor.vertex.module = vsModule; ToMockCreateRenderPipelineAsyncCallback, this);
pipelineDescriptor.vertex.entryPoint = "main";
WGPUFragmentState fragment = {}; EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _));
fragment.module = vsModule; FlushClient();
fragment.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor, EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
ToMockCreateRenderPipelineAsyncCallback, this); Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this))
EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _)) .Times(1);
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateRenderPipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient(); wgpuDeviceRelease(device);
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback, // Expect release on all objects created by the client.
Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this)) Sequence s1, s2;
.Times(1); EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2);
FlushServer(); FlushClient();
} DefaultApiDeviceWasReleased();
}
// Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error. } // namespace dawn::wire
TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) {
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
WGPURenderPipelineDescriptor pipelineDescriptor{};
pipelineDescriptor.vertex.module = vsModule;
pipelineDescriptor.vertex.entryPoint = "main";
WGPUFragmentState fragment = {};
fragment.module = vsModule;
fragment.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateRenderPipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
}));
FlushClient();
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
.Times(1);
FlushServer();
}
// Test that registering a callback then wire disconnect calls the callback with
// DeviceLost.
TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) {
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
WGPUFragmentState fragment = {};
fragment.module = vsModule;
fragment.entryPoint = "main";
WGPURenderPipelineDescriptor pipelineDescriptor{};
pipelineDescriptor.vertex.module = vsModule;
pipelineDescriptor.vertex.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateRenderPipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient();
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
.Times(1);
GetWireClient()->Disconnect();
}
// Test that registering a callback then wire disconnect calls the callback with
// DeviceLost.
TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) {
WGPUShaderModuleDescriptor csDescriptor{};
WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
WGPUShaderModule apiCsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
WGPUComputePipelineDescriptor descriptor{};
descriptor.compute.module = csModule;
descriptor.compute.entryPoint = "main";
wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
ToMockCreateComputePipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallDeviceCreateComputePipelineAsyncCallback(
apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
}));
FlushClient();
EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
.Times(1);
GetWireClient()->Disconnect();
}
// Test that registering a callback after wire disconnect calls the callback with
// DeviceLost.
TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) {
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
WGPUFragmentState fragment = {};
fragment.module = vsModule;
fragment.entryPoint = "main";
WGPURenderPipelineDescriptor pipelineDescriptor{};
pipelineDescriptor.vertex.module = vsModule;
pipelineDescriptor.vertex.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
FlushClient();
GetWireClient()->Disconnect();
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
.Times(1);
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
}
// Test that registering a callback after wire disconnect calls the callback with
// DeviceLost.
TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) {
WGPUShaderModuleDescriptor csDescriptor{};
WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
WGPUShaderModule apiCsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
WGPUComputePipelineDescriptor descriptor{};
descriptor.compute.module = csModule;
descriptor.compute.entryPoint = "main";
FlushClient();
GetWireClient()->Disconnect();
EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
.Times(1);
wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
ToMockCreateComputePipelineAsyncCallback, this);
}
TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule));
WGPURenderPipelineDescriptor pipelineDescriptor{};
pipelineDescriptor.vertex.module = module;
pipelineDescriptor.vertex.entryPoint = "main";
WGPUFragmentState fragment = {};
fragment.module = module;
fragment.entryPoint = "main";
pipelineDescriptor.fragment = &fragment;
wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
ToMockCreateRenderPipelineAsyncCallback, this);
EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _));
FlushClient();
EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this))
.Times(1);
wgpuDeviceRelease(device);
// Expect release on all objects created by the client.
Sequence s1, s2;
EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2);
FlushClient();
DefaultApiDeviceWasReleased();
}

View File

@ -15,44 +15,49 @@
#include "dawn/tests/MockCallback.h" #include "dawn/tests/MockCallback.h"
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireDestroyObjectTests : public WireTest {}; using testing::Return;
using testing::Sequence;
// Test that destroying the device also destroys child objects. class WireDestroyObjectTests : public WireTest {};
TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder(); // Test that destroying the device also destroys child objects.
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder)); TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
FlushClient(); WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiEncoder));
// Release the device. It should cause the command encoder to be destroyed. FlushClient();
wgpuDeviceRelease(device);
Sequence s1, s2; // Release the device. It should cause the command encoder to be destroyed.
// The device and child objects should be released. wgpuDeviceRelease(device);
EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1);
EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2);
FlushClient(); Sequence s1, s2;
// The device and child objects should be released.
EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1);
EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2);
// Signal that we already released and cleared callbacks for |apiDevice| FlushClient();
DefaultApiDeviceWasReleased();
// Using the command encoder should be an error. // Signal that we already released and cleared callbacks for |apiDevice|
wgpuCommandEncoderFinish(encoder, nullptr); DefaultApiDeviceWasReleased();
FlushClient(false);
} // Using the command encoder should be an error.
wgpuCommandEncoderFinish(encoder, nullptr);
FlushClient(false);
}
} // namespace dawn::wire

View File

@ -18,157 +18,167 @@
#include "dawn/tests/MockCallback.h" #include "dawn/tests/MockCallback.h"
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
namespace { using testing::_;
using testing::Exactly;
using testing::InvokeWithoutArgs;
using testing::MockCallback;
using testing::Return;
using testing::Sequence;
using testing::StrEq;
class WireDisconnectTests : public WireTest {}; namespace {
} // anonymous namespace class WireDisconnectTests : public WireTest {};
// Test that commands are not received if the client disconnects. } // anonymous namespace
TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
// Check that commands work at all.
wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); // Test that commands are not received if the client disconnects.
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
.WillOnce(Return(apiCmdBufEncoder)); // Check that commands work at all.
FlushClient(); wgpuDeviceCreateCommandEncoder(device, nullptr);
// Disconnect. WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
GetWireClient()->Disconnect(); EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder));
FlushClient();
// Command is not received because client disconnected. // Disconnect.
wgpuDeviceCreateCommandEncoder(device, nullptr); GetWireClient()->Disconnect();
EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0));
FlushClient();
}
// Test that commands that are serialized before a disconnect but flushed // Command is not received because client disconnected.
// after are received. wgpuDeviceCreateCommandEncoder(device, nullptr);
TEST_F(WireDisconnectTests, FlushAfterDisconnect) { EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0));
// Check that commands work at all. FlushClient();
wgpuDeviceCreateCommandEncoder(device, nullptr); }
// Disconnect. // Test that commands that are serialized before a disconnect but flushed
GetWireClient()->Disconnect(); // after are received.
TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
// Check that commands work at all.
wgpuDeviceCreateCommandEncoder(device, nullptr);
// Already-serialized commmands are still received. // Disconnect.
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); GetWireClient()->Disconnect();
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCmdBufEncoder));
FlushClient();
}
// Check that disconnecting the wire client calls the device lost callback exacty once. // Already-serialized commmands are still received.
TEST_F(WireDisconnectTests, CallsDeviceLostCallback) { WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback; EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), .WillOnce(Return(apiCmdBufEncoder));
mockDeviceLostCallback.MakeUserdata(this)); FlushClient();
}
// Disconnect the wire client. We should receive device lost only once. // Check that disconnecting the wire client calls the device lost callback exacty once.
EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this)) TEST_F(WireDisconnectTests, CallsDeviceLostCallback) {
.Times(Exactly(1)); MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
GetWireClient()->Disconnect(); wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
GetWireClient()->Disconnect(); mockDeviceLostCallback.MakeUserdata(this));
}
// Check that disconnecting the wire client after a device loss does not trigger the callback again. // Disconnect the wire client. We should receive device lost only once.
TEST_F(WireDisconnectTests, ServerLostThenDisconnect) { EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback; .Times(Exactly(1));
wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), GetWireClient()->Disconnect();
mockDeviceLostCallback.MakeUserdata(this)); GetWireClient()->Disconnect();
}
api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, // Check that disconnecting the wire client after a device loss does not trigger the callback
"some reason"); // again.
TEST_F(WireDisconnectTests, ServerLostThenDisconnect) {
MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
mockDeviceLostCallback.MakeUserdata(this));
// Flush the device lost return command. api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
EXPECT_CALL(mockDeviceLostCallback, "some reason");
Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this))
.Times(Exactly(1));
FlushServer();
// Disconnect the client. We shouldn't see the lost callback again. // Flush the device lost return command.
EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0)); EXPECT_CALL(mockDeviceLostCallback,
GetWireClient()->Disconnect(); Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this))
} .Times(Exactly(1));
FlushServer();
// Check that disconnecting the wire client inside the device loss callback does not trigger the // Disconnect the client. We shouldn't see the lost callback again.
// callback again. EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) { GetWireClient()->Disconnect();
MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback; }
wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
mockDeviceLostCallback.MakeUserdata(this));
api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, // Check that disconnecting the wire client inside the device loss callback does not trigger the
"lost reason"); // callback again.
TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) {
MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
mockDeviceLostCallback.MakeUserdata(this));
// Disconnect the client inside the lost callback. We should see the callback api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
// only once. "lost reason");
EXPECT_CALL(mockDeviceLostCallback,
Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this))
.WillOnce(InvokeWithoutArgs([&]() {
EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
GetWireClient()->Disconnect();
}));
FlushServer();
}
// Check that a device loss after a disconnect does not trigger the callback again. // Disconnect the client inside the lost callback. We should see the callback
TEST_F(WireDisconnectTests, DisconnectThenServerLost) { // only once.
MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback; EXPECT_CALL(mockDeviceLostCallback,
wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(), Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this))
mockDeviceLostCallback.MakeUserdata(this)); .WillOnce(InvokeWithoutArgs([&]() {
EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
GetWireClient()->Disconnect();
}));
FlushServer();
}
// Disconnect the client. We should see the callback once. // Check that a device loss after a disconnect does not trigger the callback again.
EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this)) TEST_F(WireDisconnectTests, DisconnectThenServerLost) {
.Times(Exactly(1)); MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
GetWireClient()->Disconnect(); wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
mockDeviceLostCallback.MakeUserdata(this));
// Lose the device on the server. The client callback shouldn't be // Disconnect the client. We should see the callback once.
// called again. EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined, .Times(Exactly(1));
"lost reason"); GetWireClient()->Disconnect();
EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
FlushServer();
}
// Test that client objects are all destroyed if the WireClient is destroyed. // Lose the device on the server. The client callback shouldn't be
TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) { // called again.
WGPUSamplerDescriptor desc = {}; api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
wgpuDeviceCreateCommandEncoder(device, nullptr); "lost reason");
wgpuDeviceCreateSampler(device, &desc); EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
FlushServer();
}
WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder(); // Test that client objects are all destroyed if the WireClient is destroyed.
EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)) TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) {
.WillOnce(Return(apiCommandEncoder)); WGPUSamplerDescriptor desc = {};
wgpuDeviceCreateCommandEncoder(device, nullptr);
wgpuDeviceCreateSampler(device, &desc);
WGPUSampler apiSampler = api.GetNewSampler(); WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler)); EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
.WillOnce(Return(apiCommandEncoder));
FlushClient(); WGPUSampler apiSampler = api.GetNewSampler();
EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler));
DeleteClient(); FlushClient();
// Expect release on all objects created by the client. DeleteClient();
Sequence s1, s2, s3;
EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2);
EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3);
FlushClient();
// Signal that we already released and cleared callbacks for |apiDevice| // Expect release on all objects created by the client.
DefaultApiDeviceWasReleased(); Sequence s1, s2, s3;
} EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2);
EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
.Times(1)
.InSequence(s1, s2);
EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3);
FlushClient();
// Signal that we already released and cleared callbacks for |apiDevice|
DefaultApiDeviceWasReleased();
}
} // namespace dawn::wire

View File

@ -17,290 +17,306 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
namespace { using testing::_;
using testing::DoAll;
using testing::Mock;
using testing::Return;
using testing::SaveArg;
using testing::StrEq;
using testing::StrictMock;
// Mock classes to add expectations on the wire calling callbacks namespace {
class MockDeviceErrorCallback {
// Mock classes to add expectations on the wire calling callbacks
class MockDeviceErrorCallback {
public:
MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
};
std::unique_ptr<StrictMock<MockDeviceErrorCallback>> mockDeviceErrorCallback;
void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) {
mockDeviceErrorCallback->Call(type, message, userdata);
}
class MockDevicePopErrorScopeCallback {
public:
MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
};
std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>>
mockDevicePopErrorScopeCallback;
void ToMockDevicePopErrorScopeCallback(WGPUErrorType type,
const char* message,
void* userdata) {
mockDevicePopErrorScopeCallback->Call(type, message, userdata);
}
class MockDeviceLoggingCallback {
public:
MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata));
};
std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
void ToMockDeviceLoggingCallback(WGPULoggingType type,
const char* message,
void* userdata) {
mockDeviceLoggingCallback->Call(type, message, userdata);
}
class MockDeviceLostCallback {
public:
MOCK_METHOD(void,
Call,
(WGPUDeviceLostReason reason, const char* message, void* userdata));
};
std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
void ToMockDeviceLostCallback(WGPUDeviceLostReason reason,
const char* message,
void* userdata) {
mockDeviceLostCallback->Call(reason, message, userdata);
}
} // anonymous namespace
class WireErrorCallbackTests : public WireTest {
public: public:
MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata)); WireErrorCallbackTests() {
}
~WireErrorCallbackTests() override = default;
void SetUp() override {
WireTest::SetUp();
mockDeviceErrorCallback = std::make_unique<StrictMock<MockDeviceErrorCallback>>();
mockDeviceLoggingCallback = std::make_unique<StrictMock<MockDeviceLoggingCallback>>();
mockDevicePopErrorScopeCallback =
std::make_unique<StrictMock<MockDevicePopErrorScopeCallback>>();
mockDeviceLostCallback = std::make_unique<StrictMock<MockDeviceLostCallback>>();
}
void TearDown() override {
WireTest::TearDown();
mockDeviceErrorCallback = nullptr;
mockDeviceLoggingCallback = nullptr;
mockDevicePopErrorScopeCallback = nullptr;
mockDeviceLostCallback = nullptr;
}
void FlushServer() {
WireTest::FlushServer();
Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback);
Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback);
}
}; };
std::unique_ptr<StrictMock<MockDeviceErrorCallback>> mockDeviceErrorCallback; // Test the return wire for device error callbacks
void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) { TEST_F(WireErrorCallbackTests, DeviceErrorCallback) {
mockDeviceErrorCallback->Call(type, message, userdata); wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this);
// Setting the error callback should stay on the client side and do nothing
FlushClient();
// Calling the callback on the server side will result in the callback being called on the
// client side
api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation,
"Some error message");
EXPECT_CALL(*mockDeviceErrorCallback,
Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
.Times(1);
FlushServer();
} }
class MockDevicePopErrorScopeCallback { // Test the return wire for device user warning callbacks
public: TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata)); wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this);
};
std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>> mockDevicePopErrorScopeCallback; // Setting the injected warning callback should stay on the client side and do nothing
void ToMockDevicePopErrorScopeCallback(WGPUErrorType type, FlushClient();
const char* message,
void* userdata) { // Calling the callback on the server side will result in the callback being called on the
mockDevicePopErrorScopeCallback->Call(type, message, userdata); // client side
api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
EXPECT_CALL(*mockDeviceLoggingCallback,
Call(WGPULoggingType_Info, StrEq("Some message"), this))
.Times(1);
FlushServer();
} }
class MockDeviceLoggingCallback { // Test the return wire for error scopes.
public: TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) {
MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata)); EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
};
std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
void ToMockDeviceLoggingCallback(WGPULoggingType type, const char* message, void* userdata) {
mockDeviceLoggingCallback->Call(type, message, userdata);
}
class MockDeviceLostCallback {
public:
MOCK_METHOD(void, Call, (WGPUDeviceLostReason reason, const char* message, void* userdata));
};
std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
void ToMockDeviceLostCallback(WGPUDeviceLostReason reason,
const char* message,
void* userdata) {
mockDeviceLostCallback->Call(reason, message, userdata);
}
} // anonymous namespace
class WireErrorCallbackTests : public WireTest {
public:
WireErrorCallbackTests() {
}
~WireErrorCallbackTests() override = default;
void SetUp() override {
WireTest::SetUp();
mockDeviceErrorCallback = std::make_unique<StrictMock<MockDeviceErrorCallback>>();
mockDeviceLoggingCallback = std::make_unique<StrictMock<MockDeviceLoggingCallback>>();
mockDevicePopErrorScopeCallback =
std::make_unique<StrictMock<MockDevicePopErrorScopeCallback>>();
mockDeviceLostCallback = std::make_unique<StrictMock<MockDeviceLostCallback>>();
}
void TearDown() override {
WireTest::TearDown();
mockDeviceErrorCallback = nullptr;
mockDeviceLoggingCallback = nullptr;
mockDevicePopErrorScopeCallback = nullptr;
mockDeviceLostCallback = nullptr;
}
void FlushServer() {
WireTest::FlushServer();
Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback);
Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback);
}
};
// Test the return wire for device error callbacks
TEST_F(WireErrorCallbackTests, DeviceErrorCallback) {
wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this);
// Setting the error callback should stay on the client side and do nothing
FlushClient();
// Calling the callback on the server side will result in the callback being called on the
// client side
api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation,
"Some error message");
EXPECT_CALL(*mockDeviceErrorCallback,
Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
.Times(1);
FlushServer();
}
// Test the return wire for device user warning callbacks
TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this);
// Setting the injected warning callback should stay on the client side and do nothing
FlushClient();
// Calling the callback on the server side will result in the callback being called on the
// client side
api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
EXPECT_CALL(*mockDeviceLoggingCallback, Call(WGPULoggingType_Info, StrEq("Some message"), this))
.Times(1);
FlushServer();
}
// Test the return wire for error scopes.
TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient();
WGPUErrorCallback callback;
void* userdata;
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
.WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
.Times(1);
callback(WGPUErrorType_Validation, "Some error message", userdata);
FlushServer();
}
// Test the return wire for error scopes when callbacks return in a various orders.
TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) {
// Two error scopes are popped, and the first one returns first.
{
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient(); FlushClient();
WGPUErrorCallback callback1; WGPUErrorCallback callback;
WGPUErrorCallback callback2; void* userdata;
void* userdata1;
void* userdata2;
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
.WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true))) .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
.WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
FlushClient(); FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback, EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("First error message"), this)) Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
.Times(1); .Times(1);
callback1(WGPUErrorType_Validation, "First error message", userdata1); callback(WGPUErrorType_Validation, "Some error message", userdata);
FlushServer();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
.Times(1);
callback2(WGPUErrorType_Validation, "Second error message", userdata2);
FlushServer(); FlushServer();
} }
// Two error scopes are popped, and the second one returns first. // Test the return wire for error scopes when callbacks return in a various orders.
{ TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2); // Two error scopes are popped, and the first one returns first.
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient();
WGPUErrorCallback callback1;
WGPUErrorCallback callback2;
void* userdata1;
void* userdata2;
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
.WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
.WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("First error message"), this))
.Times(1);
callback1(WGPUErrorType_Validation, "First error message", userdata1);
FlushServer();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
.Times(1);
callback2(WGPUErrorType_Validation, "Second error message", userdata2);
FlushServer();
}
// Two error scopes are popped, and the second one returns first.
{
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient();
WGPUErrorCallback callback1;
WGPUErrorCallback callback2;
void* userdata1;
void* userdata2;
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
.WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
.WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
.Times(1);
callback2(WGPUErrorType_Validation, "Second error message", userdata2);
FlushServer();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("First error message"), this))
.Times(1);
callback1(WGPUErrorType_Validation, "First error message", userdata1);
FlushServer();
}
}
// Test the return wire for error scopes in flight when the device is destroyed.
TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation); wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient(); FlushClient();
WGPUErrorCallback callback1; EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
WGPUErrorCallback callback2; wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
void* userdata1; FlushClient();
void* userdata2;
// Incomplete callback called in Device destructor. This is resolved after the end of this
// test.
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Unknown, ValidStringMessage(), this))
.Times(1);
}
// Test that registering a callback then wire disconnect calls the callback with
// DeviceLost.
TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
.Times(1);
GetWireClient()->Disconnect();
}
// Test that registering a callback after wire disconnect calls the callback with
// DeviceLost.
TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient();
GetWireClient()->Disconnect();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
.Times(1);
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
}
// Empty stack (We are emulating the errors that would be callback-ed from native).
TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) {
WGPUErrorCallback callback;
void* userdata;
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)) EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
.WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true))) .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
.WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
FlushClient(); FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback, EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1)) Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this))
.Times(1); .Times(1);
callback2(WGPUErrorType_Validation, "Second error message", userdata2); callback(WGPUErrorType_Validation, "No error scopes to pop", userdata);
FlushServer();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("First error message"), this))
.Times(1);
callback1(WGPUErrorType_Validation, "First error message", userdata1);
FlushServer(); FlushServer();
} }
}
// Test the return wire for error scopes in flight when the device is destroyed. // Test the return wire for device lost callback
TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) { TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1); wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient();
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true)); // Setting the error callback should stay on the client side and do nothing
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); FlushClient();
FlushClient();
// Incomplete callback called in Device destructor. This is resolved after the end of this test. // Calling the callback on the server side will result in the callback being called on the
EXPECT_CALL(*mockDevicePopErrorScopeCallback, // client side
Call(WGPUErrorType_Unknown, ValidStringMessage(), this)) api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
.Times(1); "Some error message");
}
// Test that registering a callback then wire disconnect calls the callback with EXPECT_CALL(*mockDeviceLostCallback,
// DeviceLost. Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this))
TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) { .Times(1);
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true)); FlushServer();
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this); }
FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback, } // namespace dawn::wire
Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
.Times(1);
GetWireClient()->Disconnect();
}
// Test that registering a callback after wire disconnect calls the callback with
// DeviceLost.
TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) {
EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
FlushClient();
GetWireClient()->Disconnect();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
.Times(1);
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
}
// Empty stack (We are emulating the errors that would be callback-ed from native).
TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) {
WGPUErrorCallback callback;
void* userdata;
EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
.WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
FlushClient();
EXPECT_CALL(*mockDevicePopErrorScopeCallback,
Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this))
.Times(1);
callback(WGPUErrorType_Validation, "No error scopes to pop", userdata);
FlushServer();
}
// Test the return wire for device lost callback
TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this);
// Setting the error callback should stay on the client side and do nothing
FlushClient();
// Calling the callback on the server side will result in the callback being called on the
// client side
api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
"Some error message");
EXPECT_CALL(*mockDeviceLostCallback,
Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this))
.Times(1);
FlushServer();
}

View File

@ -14,76 +14,81 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireExtensionTests : public WireTest { using testing::_;
public: using testing::Invoke;
WireExtensionTests() { using testing::NotNull;
using testing::Return;
using testing::Unused;
class WireExtensionTests : public WireTest {
public:
WireExtensionTests() {
}
~WireExtensionTests() override = default;
};
// Serialize/Deserializes a chained struct correctly.
TEST_F(WireExtensionTests, ChainedStruct) {
WGPUShaderModuleDescriptor shaderModuleDesc = {};
WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
FlushClient();
WGPUPrimitiveDepthClampingState clientExt = {};
clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState;
clientExt.chain.next = nullptr;
clientExt.clampDepth = true;
WGPURenderPipelineDescriptor renderPipelineDesc = {};
renderPipelineDesc.vertex.module = shaderModule;
renderPipelineDesc.vertex.entryPoint = "main";
renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain);
EXPECT_EQ(ext->chain.sType, clientExt.chain.sType);
EXPECT_EQ(ext->clampDepth, true);
EXPECT_EQ(ext->chain.next, nullptr);
return api.GetNewRenderPipeline();
}));
FlushClient();
} }
~WireExtensionTests() override = default;
};
// Serialize/Deserializes a chained struct correctly. // Serialize/Deserializes multiple chained structs correctly.
TEST_F(WireExtensionTests, ChainedStruct) { TEST_F(WireExtensionTests, MutlipleChainedStructs) {
WGPUShaderModuleDescriptor shaderModuleDesc = {}; WGPUShaderModuleDescriptor shaderModuleDesc = {};
WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
FlushClient(); FlushClient();
WGPUPrimitiveDepthClampingState clientExt = {}; WGPUPrimitiveDepthClampingState clientExt2 = {};
clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState; clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState;
clientExt.chain.next = nullptr; clientExt2.chain.next = nullptr;
clientExt.clampDepth = true; clientExt2.clampDepth = false;
WGPURenderPipelineDescriptor renderPipelineDesc = {}; WGPUPrimitiveDepthClampingState clientExt1 = {};
renderPipelineDesc.vertex.module = shaderModule; clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
renderPipelineDesc.vertex.entryPoint = "main"; clientExt1.chain.next = &clientExt2.chain;
renderPipelineDesc.primitive.nextInChain = &clientExt.chain; clientExt1.clampDepth = true;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); WGPURenderPipelineDescriptor renderPipelineDesc = {};
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) renderPipelineDesc.vertex.module = shaderModule;
.WillOnce(Invoke( renderPipelineDesc.vertex.entryPoint = "main";
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain);
EXPECT_EQ(ext->chain.sType, clientExt.chain.sType);
EXPECT_EQ(ext->clampDepth, true);
EXPECT_EQ(ext->chain.next, nullptr);
return api.GetNewRenderPipeline(); wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
})); EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
FlushClient(); .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
} -> WGPURenderPipeline {
// Serialize/Deserializes multiple chained structs correctly.
TEST_F(WireExtensionTests, MutlipleChainedStructs) {
WGPUShaderModuleDescriptor shaderModuleDesc = {};
WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
FlushClient();
WGPUPrimitiveDepthClampingState clientExt2 = {};
clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState;
clientExt2.chain.next = nullptr;
clientExt2.clampDepth = false;
WGPUPrimitiveDepthClampingState clientExt1 = {};
clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
clientExt1.chain.next = &clientExt2.chain;
clientExt1.clampDepth = true;
WGPURenderPipelineDescriptor renderPipelineDesc = {};
renderPipelineDesc.vertex.module = shaderModule;
renderPipelineDesc.vertex.entryPoint = "main";
renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
const auto* ext1 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>( const auto* ext1 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain); serverDesc->primitive.nextInChain);
EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType); EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType);
@ -97,17 +102,17 @@ TEST_F(WireExtensionTests, MutlipleChainedStructs) {
return api.GetNewRenderPipeline(); return api.GetNewRenderPipeline();
})); }));
FlushClient(); FlushClient();
// Swap the order of the chained structs. // Swap the order of the chained structs.
renderPipelineDesc.primitive.nextInChain = &clientExt2.chain; renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
clientExt2.chain.next = &clientExt1.chain; clientExt2.chain.next = &clientExt1.chain;
clientExt1.chain.next = nullptr; clientExt1.chain.next = nullptr;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke( .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { -> WGPURenderPipeline {
const auto* ext2 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>( const auto* ext2 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain); serverDesc->primitive.nextInChain);
EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType); EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType);
@ -121,121 +126,123 @@ TEST_F(WireExtensionTests, MutlipleChainedStructs) {
return api.GetNewRenderPipeline(); return api.GetNewRenderPipeline();
})); }));
FlushClient(); FlushClient();
} }
// Test that a chained struct with Invalid sType passes through as Invalid. // Test that a chained struct with Invalid sType passes through as Invalid.
TEST_F(WireExtensionTests, InvalidSType) { TEST_F(WireExtensionTests, InvalidSType) {
WGPUShaderModuleDescriptor shaderModuleDesc = {}; WGPUShaderModuleDescriptor shaderModuleDesc = {};
WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
FlushClient(); FlushClient();
WGPUPrimitiveDepthClampingState clientExt = {}; WGPUPrimitiveDepthClampingState clientExt = {};
clientExt.chain.sType = WGPUSType_Invalid; clientExt.chain.sType = WGPUSType_Invalid;
clientExt.chain.next = nullptr; clientExt.chain.next = nullptr;
WGPURenderPipelineDescriptor renderPipelineDesc = {}; WGPURenderPipelineDescriptor renderPipelineDesc = {};
renderPipelineDesc.vertex.module = shaderModule; renderPipelineDesc.vertex.module = shaderModule;
renderPipelineDesc.vertex.entryPoint = "main"; renderPipelineDesc.vertex.entryPoint = "main";
renderPipelineDesc.primitive.nextInChain = &clientExt.chain; renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke( .WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr); EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
return api.GetNewRenderPipeline(); return api.GetNewRenderPipeline();
})); }));
FlushClient(); FlushClient();
} }
// Test that a chained struct with unknown sType passes through as Invalid. // Test that a chained struct with unknown sType passes through as Invalid.
TEST_F(WireExtensionTests, UnknownSType) { TEST_F(WireExtensionTests, UnknownSType) {
WGPUShaderModuleDescriptor shaderModuleDesc = {}; WGPUShaderModuleDescriptor shaderModuleDesc = {};
WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
FlushClient(); FlushClient();
WGPUPrimitiveDepthClampingState clientExt = {}; WGPUPrimitiveDepthClampingState clientExt = {};
clientExt.chain.sType = static_cast<WGPUSType>(-1); clientExt.chain.sType = static_cast<WGPUSType>(-1);
clientExt.chain.next = nullptr; clientExt.chain.next = nullptr;
WGPURenderPipelineDescriptor renderPipelineDesc = {}; WGPURenderPipelineDescriptor renderPipelineDesc = {};
renderPipelineDesc.vertex.module = shaderModule; renderPipelineDesc.vertex.module = shaderModule;
renderPipelineDesc.vertex.entryPoint = "main"; renderPipelineDesc.vertex.entryPoint = "main";
renderPipelineDesc.primitive.nextInChain = &clientExt.chain; renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke( .WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr); EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
return api.GetNewRenderPipeline(); return api.GetNewRenderPipeline();
})); }));
FlushClient(); FlushClient();
} }
// Test that if both an invalid and valid stype are passed on the chain, only the invalid // Test that if both an invalid and valid stype are passed on the chain, only the invalid
// sType passes through as Invalid. // sType passes through as Invalid.
TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) { TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) {
WGPUShaderModuleDescriptor shaderModuleDesc = {}; WGPUShaderModuleDescriptor shaderModuleDesc = {};
WGPUShaderModule apiShaderModule = api.GetNewShaderModule(); WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc); WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule)); EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
FlushClient(); FlushClient();
WGPUPrimitiveDepthClampingState clientExt2 = {}; WGPUPrimitiveDepthClampingState clientExt2 = {};
clientExt2.chain.sType = WGPUSType_Invalid; clientExt2.chain.sType = WGPUSType_Invalid;
clientExt2.chain.next = nullptr; clientExt2.chain.next = nullptr;
WGPUPrimitiveDepthClampingState clientExt1 = {}; WGPUPrimitiveDepthClampingState clientExt1 = {};
clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState; clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
clientExt1.chain.next = &clientExt2.chain; clientExt1.chain.next = &clientExt2.chain;
clientExt1.clampDepth = true; clientExt1.clampDepth = true;
WGPURenderPipelineDescriptor renderPipelineDesc = {}; WGPURenderPipelineDescriptor renderPipelineDesc = {};
renderPipelineDesc.vertex.module = shaderModule; renderPipelineDesc.vertex.module = shaderModule;
renderPipelineDesc.vertex.entryPoint = "main"; renderPipelineDesc.vertex.entryPoint = "main";
renderPipelineDesc.primitive.nextInChain = &clientExt1.chain; renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke( .WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>( const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain); serverDesc->primitive.nextInChain);
EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType); EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
EXPECT_EQ(ext->clampDepth, true); EXPECT_EQ(ext->clampDepth, true);
EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid); EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid);
EXPECT_EQ(ext->chain.next->next, nullptr); EXPECT_EQ(ext->chain.next->next, nullptr);
return api.GetNewRenderPipeline(); return api.GetNewRenderPipeline();
})); }));
FlushClient(); FlushClient();
// Swap the order of the chained structs. // Swap the order of the chained structs.
renderPipelineDesc.primitive.nextInChain = &clientExt2.chain; renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
clientExt2.chain.next = &clientExt1.chain; clientExt2.chain.next = &clientExt1.chain;
clientExt1.chain.next = nullptr; clientExt1.chain.next = nullptr;
wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc); wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull())) EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
.WillOnce(Invoke( .WillOnce(Invoke(
[&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline { [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid); EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>( const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
serverDesc->primitive.nextInChain->next); serverDesc->primitive.nextInChain->next);
EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType); EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
EXPECT_EQ(ext->clampDepth, true); EXPECT_EQ(ext->clampDepth, true);
EXPECT_EQ(ext->chain.next, nullptr); EXPECT_EQ(ext->chain.next, nullptr);
return api.GetNewRenderPipeline(); return api.GetNewRenderPipeline();
})); }));
FlushClient(); FlushClient();
} }
} // namespace dawn::wire

View File

@ -17,256 +17,273 @@
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h" #include "dawn/wire/WireServer.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireInjectDeviceTests : public WireTest { using testing::_;
public: using testing::Exactly;
WireInjectDeviceTests() { using testing::Mock;
} using testing::Return;
~WireInjectDeviceTests() override = default;
};
// Test that reserving and injecting a device makes calls on the client object forward to the class WireInjectDeviceTests : public WireTest {
// server object correctly. public:
TEST_F(WireInjectDeviceTests, CallAfterReserveInject) { WireInjectDeviceTests() {
ReservedDevice reservation = GetWireClient()->ReserveDevice(); }
~WireInjectDeviceTests() override = default;
};
WGPUDevice serverDevice = api.GetNewDevice(); // Test that reserving and injecting a device makes calls on the client object forward to the
EXPECT_CALL(api, DeviceReference(serverDevice)); // server object correctly.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _)); TEST_F(WireInjectDeviceTests, CallAfterReserveInject) {
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
WGPUBufferDescriptor bufferDesc = {};
wgpuDeviceCreateBuffer(reservation.device, &bufferDesc);
WGPUBuffer serverBuffer = api.GetNewBuffer();
EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer));
FlushClient();
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
// Test that reserve correctly returns different IDs each time.
TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) {
ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
ASSERT_NE(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.device, reservation2.device);
}
// Test that injecting the same id without a destroy first fails.
TEST_F(WireInjectDeviceTests, InjectExistingID) {
ReservedDevice reservation = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
// ID already in use, call fails.
ASSERT_FALSE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
// Test that the server only borrows the device and does a single reference-release
TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
ReservedDevice reservation = GetWireClient()->ReserveDevice();
// Injecting the device adds a reference
WGPUDevice serverDevice = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
// Releasing the device removes a single reference and clears its error callbacks.
wgpuDeviceRelease(reservation.device);
EXPECT_CALL(api, DeviceRelease(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
FlushClient();
// Deleting the server doesn't release a second reference.
DeleteServer();
Mock::VerifyAndClearExpectations(&api);
}
// Test that it is an error to get the primary queue of a device before it has been
// injected on the server.
TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) {
ReservedDevice reservation = GetWireClient()->ReserveDevice();
wgpuDeviceGetQueue(reservation.device);
FlushClient(false);
}
// Test that it is valid to get the primary queue of a device after it has been
// injected on the server.
TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
ReservedDevice reservation = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
wgpuDeviceGetQueue(reservation.device);
WGPUQueue apiQueue = api.GetNewQueue();
EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue));
FlushClient();
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
// Test that the list of live devices can be reflected using GetDevice.
TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
// Reserve two devices.
ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
// Inject both devices.
WGPUDevice serverDevice1 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice1));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
WGPUDevice serverDevice2 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice2));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
// Test that both devices can be reflected.
ASSERT_EQ(serverDevice1, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
// Release the first device
wgpuDeviceRelease(reservation1.device);
EXPECT_CALL(api, DeviceRelease(serverDevice1));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
FlushClient();
// The first device should no longer reflect, but the second should
ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
}
// This is a regression test where a second device reservation invalidated pointers into the
// KnownObjects std::vector of devices. The fix was to store pointers to heap allocated
// objects instead.
TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
// Reserve one device, inject it, and get the primary queue.
ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice1 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice1));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
WGPUCommandEncoder commandEncoder =
wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr);
WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _))
.WillOnce(Return(serverCommandEncoder));
FlushClient();
// Reserve a second device, and inject it.
ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice2 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice2));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
// Release the encoder. This should work without error because it stores a stable
// pointer to its device's list of child objects. On destruction, it removes itself from the
// list.
wgpuCommandEncoderRelease(commandEncoder);
EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder));
FlushClient();
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
}
// Test that a device reservation can be reclaimed. This is necessary to
// avoid leaking ObjectIDs for reservations that are never injected.
TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
// Test that doing a reservation and full release is an error.
{
ReservedDevice reservation = GetWireClient()->ReserveDevice(); ReservedDevice reservation = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
WGPUBufferDescriptor bufferDesc = {};
wgpuDeviceCreateBuffer(reservation.device, &bufferDesc);
WGPUBuffer serverBuffer = api.GetNewBuffer();
EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer));
FlushClient();
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
// Test that reserve correctly returns different IDs each time.
TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) {
ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
ASSERT_NE(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.device, reservation2.device);
}
// Test that injecting the same id without a destroy first fails.
TEST_F(WireInjectDeviceTests, InjectExistingID) {
ReservedDevice reservation = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
// ID already in use, call fails.
ASSERT_FALSE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
// Test that the server only borrows the device and does a single reference-release
TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
ReservedDevice reservation = GetWireClient()->ReserveDevice();
// Injecting the device adds a reference
WGPUDevice serverDevice = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
// Releasing the device removes a single reference and clears its error callbacks.
wgpuDeviceRelease(reservation.device); wgpuDeviceRelease(reservation.device);
EXPECT_CALL(api, DeviceRelease(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
FlushClient();
// Deleting the server doesn't release a second reference.
DeleteServer();
Mock::VerifyAndClearExpectations(&api);
}
// Test that it is an error to get the primary queue of a device before it has been
// injected on the server.
TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) {
ReservedDevice reservation = GetWireClient()->ReserveDevice();
wgpuDeviceGetQueue(reservation.device);
FlushClient(false); FlushClient(false);
} }
// Test that doing a reservation and then reclaiming it recycles the ID. // Test that it is valid to get the primary queue of a device after it has been
{ // injected on the server.
ReservedDevice reservation1 = GetWireClient()->ReserveDevice(); TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
GetWireClient()->ReclaimDeviceReservation(reservation1); ReservedDevice reservation = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
wgpuDeviceGetQueue(reservation.device);
WGPUQueue apiQueue = api.GetNewQueue();
EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue));
FlushClient();
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
.Times(Exactly(1));
}
// Test that the list of live devices can be reflected using GetDevice.
TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
// Reserve two devices.
ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
ReservedDevice reservation2 = GetWireClient()->ReserveDevice(); ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
// The ID is the same, but the generation is still different. // Inject both devices.
ASSERT_EQ(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.generation, reservation2.generation);
// No errors should occur. WGPUDevice serverDevice1 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice1));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
WGPUDevice serverDevice2 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice2));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
// Test that both devices can be reflected.
ASSERT_EQ(serverDevice1,
GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
ASSERT_EQ(serverDevice2,
GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
// Release the first device
wgpuDeviceRelease(reservation1.device);
EXPECT_CALL(api, DeviceRelease(serverDevice1));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
FlushClient(); FlushClient();
// The first device should no longer reflect, but the second should
ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
ASSERT_EQ(serverDevice2,
GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
} }
}
// This is a regression test where a second device reservation invalidated pointers into the
// KnownObjects std::vector of devices. The fix was to store pointers to heap allocated
// objects instead.
TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
// Reserve one device, inject it, and get the primary queue.
ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice1 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice1));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
WGPUCommandEncoder commandEncoder =
wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr);
WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder();
EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _))
.WillOnce(Return(serverCommandEncoder));
FlushClient();
// Reserve a second device, and inject it.
ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
WGPUDevice serverDevice2 = api.GetNewDevice();
EXPECT_CALL(api, DeviceReference(serverDevice2));
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
ASSERT_TRUE(
GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
// Release the encoder. This should work without error because it stores a stable
// pointer to its device's list of child objects. On destruction, it removes itself from the
// list.
wgpuCommandEncoderRelease(commandEncoder);
EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder));
FlushClient();
// Called on shutdown.
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
.Times(1);
EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
}
// Test that a device reservation can be reclaimed. This is necessary to
// avoid leaking ObjectIDs for reservations that are never injected.
TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
// Test that doing a reservation and full release is an error.
{
ReservedDevice reservation = GetWireClient()->ReserveDevice();
wgpuDeviceRelease(reservation.device);
FlushClient(false);
}
// Test that doing a reservation and then reclaiming it recycles the ID.
{
ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
GetWireClient()->ReclaimDeviceReservation(reservation1);
ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
// The ID is the same, but the generation is still different.
ASSERT_EQ(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.generation, reservation2.generation);
// No errors should occur.
FlushClient();
}
}
} // namespace dawn::wire

View File

@ -17,10 +17,11 @@
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h" #include "dawn/wire/WireServer.h"
using namespace testing; namespace dawn::wire { namespace {
using namespace dawn::wire;
namespace { using testing::Mock;
using testing::NotNull;
using testing::Return;
class WireInjectInstanceTests : public WireTest { class WireInjectInstanceTests : public WireTest {
public: public:
@ -116,4 +117,6 @@ namespace {
} }
} }
} // anonymous namespace // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::wire::

View File

@ -17,100 +17,103 @@
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h" #include "dawn/wire/WireServer.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireInjectSwapChainTests : public WireTest { using testing::Mock;
public:
WireInjectSwapChainTests() {
}
~WireInjectSwapChainTests() override = default;
};
// Test that reserving and injecting a swapchain makes calls on the client object forward to the class WireInjectSwapChainTests : public WireTest {
// server object correctly. public:
TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) { WireInjectSwapChainTests() {
ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); }
~WireInjectSwapChainTests() override = default;
};
WGPUSwapChain apiSwapchain = api.GetNewSwapChain(); // Test that reserving and injecting a swapchain makes calls on the client object forward to the
EXPECT_CALL(api, SwapChainReference(apiSwapchain)); // server object correctly.
ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id, TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) {
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
wgpuSwapChainPresent(reservation.swapchain);
EXPECT_CALL(api, SwapChainPresent(apiSwapchain));
FlushClient();
}
// Test that reserve correctly returns different IDs each time.
TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) {
ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
ASSERT_NE(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.swapchain, reservation2.swapchain);
}
// Test that injecting the same id without a destroy first fails.
TEST_F(WireInjectSwapChainTests, InjectExistingID) {
ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
EXPECT_CALL(api, SwapChainReference(apiSwapchain));
ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// ID already in use, call fails.
ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
}
// Test that the server only borrows the swapchain and does a single reference-release
TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) {
ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
// Injecting the swapchain adds a reference
WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
EXPECT_CALL(api, SwapChainReference(apiSwapchain));
ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// Releasing the swapchain removes a single reference.
wgpuSwapChainRelease(reservation.swapchain);
EXPECT_CALL(api, SwapChainRelease(apiSwapchain));
FlushClient();
// Deleting the server doesn't release a second reference.
DeleteServer();
Mock::VerifyAndClearExpectations(&api);
}
// Test that a swapchain reservation can be reclaimed. This is necessary to
// avoid leaking ObjectIDs for reservations that are never injected.
TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
// Test that doing a reservation and full release is an error.
{
ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device); ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
wgpuSwapChainRelease(reservation.swapchain);
FlushClient(false);
}
// Test that doing a reservation and then reclaiming it recycles the ID. WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
{ EXPECT_CALL(api, SwapChainReference(apiSwapchain));
ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device); ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
GetWireClient()->ReclaimSwapChainReservation(reservation1); reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device); wgpuSwapChainPresent(reservation.swapchain);
EXPECT_CALL(api, SwapChainPresent(apiSwapchain));
// The ID is the same, but the generation is still different.
ASSERT_EQ(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.generation, reservation2.generation);
// No errors should occur.
FlushClient(); FlushClient();
} }
}
// Test that reserve correctly returns different IDs each time.
TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) {
ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
ASSERT_NE(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.swapchain, reservation2.swapchain);
}
// Test that injecting the same id without a destroy first fails.
TEST_F(WireInjectSwapChainTests, InjectExistingID) {
ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
EXPECT_CALL(api, SwapChainReference(apiSwapchain));
ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// ID already in use, call fails.
ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
}
// Test that the server only borrows the swapchain and does a single reference-release
TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) {
ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
// Injecting the swapchain adds a reference
WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
EXPECT_CALL(api, SwapChainReference(apiSwapchain));
ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// Releasing the swapchain removes a single reference.
wgpuSwapChainRelease(reservation.swapchain);
EXPECT_CALL(api, SwapChainRelease(apiSwapchain));
FlushClient();
// Deleting the server doesn't release a second reference.
DeleteServer();
Mock::VerifyAndClearExpectations(&api);
}
// Test that a swapchain reservation can be reclaimed. This is necessary to
// avoid leaking ObjectIDs for reservations that are never injected.
TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
// Test that doing a reservation and full release is an error.
{
ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
wgpuSwapChainRelease(reservation.swapchain);
FlushClient(false);
}
// Test that doing a reservation and then reclaiming it recycles the ID.
{
ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
GetWireClient()->ReclaimSwapChainReservation(reservation1);
ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
// The ID is the same, but the generation is still different.
ASSERT_EQ(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.generation, reservation2.generation);
// No errors should occur.
FlushClient();
}
}
} // namespace dawn::wire

View File

@ -17,98 +17,106 @@
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h" #include "dawn/wire/WireServer.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireInjectTextureTests : public WireTest { using testing::Mock;
public: using testing::Return;
WireInjectTextureTests() {
}
~WireInjectTextureTests() override = default;
};
// Test that reserving and injecting a texture makes calls on the client object forward to the class WireInjectTextureTests : public WireTest {
// server object correctly. public:
TEST_F(WireInjectTextureTests, CallAfterReserveInject) { WireInjectTextureTests() {
ReservedTexture reservation = GetWireClient()->ReserveTexture(device); }
~WireInjectTextureTests() override = default;
};
WGPUTexture apiTexture = api.GetNewTexture(); // Test that reserving and injecting a texture makes calls on the client object forward to the
EXPECT_CALL(api, TextureReference(apiTexture)); // server object correctly.
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation, TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
reservation.deviceId, reservation.deviceGeneration));
wgpuTextureCreateView(reservation.texture, nullptr);
WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiPlaceholderView));
FlushClient();
}
// Test that reserve correctly returns different IDs each time.
TEST_F(WireInjectTextureTests, ReserveDifferentIDs) {
ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
ASSERT_NE(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.texture, reservation2.texture);
}
// Test that injecting the same id without a destroy first fails.
TEST_F(WireInjectTextureTests, InjectExistingID) {
ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
WGPUTexture apiTexture = api.GetNewTexture();
EXPECT_CALL(api, TextureReference(apiTexture));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
reservation.deviceId, reservation.deviceGeneration));
// ID already in use, call fails.
ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
reservation.deviceId,
reservation.deviceGeneration));
}
// Test that the server only borrows the texture and does a single reference-release
TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
// Injecting the texture adds a reference
WGPUTexture apiTexture = api.GetNewTexture();
EXPECT_CALL(api, TextureReference(apiTexture));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
reservation.deviceId, reservation.deviceGeneration));
// Releasing the texture removes a single reference.
wgpuTextureRelease(reservation.texture);
EXPECT_CALL(api, TextureRelease(apiTexture));
FlushClient();
// Deleting the server doesn't release a second reference.
DeleteServer();
Mock::VerifyAndClearExpectations(&api);
}
// Test that a texture reservation can be reclaimed. This is necessary to
// avoid leaking ObjectIDs for reservations that are never injected.
TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
// Test that doing a reservation and full release is an error.
{
ReservedTexture reservation = GetWireClient()->ReserveTexture(device); ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
wgpuTextureRelease(reservation.texture);
FlushClient(false);
}
// Test that doing a reservation and then reclaiming it recycles the ID. WGPUTexture apiTexture = api.GetNewTexture();
{ EXPECT_CALL(api, TextureReference(apiTexture));
ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device); ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
GetWireClient()->ReclaimTextureReservation(reservation1); reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device); wgpuTextureCreateView(reservation.texture, nullptr);
WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
// The ID is the same, but the generation is still different. EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr))
ASSERT_EQ(reservation1.id, reservation2.id); .WillOnce(Return(apiPlaceholderView));
ASSERT_NE(reservation1.generation, reservation2.generation);
// No errors should occur.
FlushClient(); FlushClient();
} }
}
// Test that reserve correctly returns different IDs each time.
TEST_F(WireInjectTextureTests, ReserveDifferentIDs) {
ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
ASSERT_NE(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.texture, reservation2.texture);
}
// Test that injecting the same id without a destroy first fails.
TEST_F(WireInjectTextureTests, InjectExistingID) {
ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
WGPUTexture apiTexture = api.GetNewTexture();
EXPECT_CALL(api, TextureReference(apiTexture));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// ID already in use, call fails.
ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
}
// Test that the server only borrows the texture and does a single reference-release
TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
// Injecting the texture adds a reference
WGPUTexture apiTexture = api.GetNewTexture();
EXPECT_CALL(api, TextureReference(apiTexture));
ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
reservation.generation, reservation.deviceId,
reservation.deviceGeneration));
// Releasing the texture removes a single reference.
wgpuTextureRelease(reservation.texture);
EXPECT_CALL(api, TextureRelease(apiTexture));
FlushClient();
// Deleting the server doesn't release a second reference.
DeleteServer();
Mock::VerifyAndClearExpectations(&api);
}
// Test that a texture reservation can be reclaimed. This is necessary to
// avoid leaking ObjectIDs for reservations that are never injected.
TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
// Test that doing a reservation and full release is an error.
{
ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
wgpuTextureRelease(reservation.texture);
FlushClient(false);
}
// Test that doing a reservation and then reclaiming it recycles the ID.
{
ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
GetWireClient()->ReclaimTextureReservation(reservation1);
ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
// The ID is the same, but the generation is still different.
ASSERT_EQ(reservation1.id, reservation2.id);
ASSERT_NE(reservation1.generation, reservation2.generation);
// No errors should occur.
FlushClient();
}
}
} // namespace dawn::wire

View File

@ -23,10 +23,16 @@
#include "webgpu/webgpu_cpp.h" #include "webgpu/webgpu_cpp.h"
namespace { namespace dawn::wire { namespace {
using namespace testing; using testing::Invoke;
using namespace dawn::wire; using testing::InvokeWithoutArgs;
using testing::MockCallback;
using testing::NotNull;
using testing::Return;
using testing::SetArgPointee;
using testing::StrEq;
using testing::WithArg;
class WireInstanceBasicTest : public WireTest {}; class WireInstanceBasicTest : public WireTest {};
class WireInstanceTests : public WireTest { class WireInstanceTests : public WireTest {
@ -284,4 +290,6 @@ namespace {
GetWireClient()->Disconnect(); GetWireClient()->Disconnect();
} }
} // anonymous namespace // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
// NOLINTNEXTLINE(readability/namespace)
}} // namespace dawn::wire::

View File

@ -14,166 +14,173 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class WireOptionalTests : public WireTest { using testing::_;
public: using testing::Return;
WireOptionalTests() {
class WireOptionalTests : public WireTest {
public:
WireOptionalTests() {
}
~WireOptionalTests() override = default;
};
// Test passing nullptr instead of objects - object as value version
TEST_F(WireOptionalTests, OptionalObjectValue) {
WGPUBindGroupLayoutDescriptor bglDesc = {};
bglDesc.entryCount = 0;
WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc);
WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout();
EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _))
.WillOnce(Return(apiBindGroupLayout));
// The `sampler`, `textureView` and `buffer` members of a binding are optional.
WGPUBindGroupEntry entry;
entry.binding = 0;
entry.sampler = nullptr;
entry.textureView = nullptr;
entry.buffer = nullptr;
entry.nextInChain = nullptr;
WGPUBindGroupDescriptor bgDesc = {};
bgDesc.layout = bgl;
bgDesc.entryCount = 1;
bgDesc.entries = &entry;
wgpuDeviceCreateBindGroup(device, &bgDesc);
WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
EXPECT_CALL(api,
DeviceCreateBindGroup(
apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && desc->entryCount == 1 &&
desc->entries[0].binding == 0 &&
desc->entries[0].sampler == nullptr &&
desc->entries[0].buffer == nullptr &&
desc->entries[0].textureView == nullptr;
})))
.WillOnce(Return(apiPlaceholderBindGroup));
FlushClient();
} }
~WireOptionalTests() override = default;
};
// Test passing nullptr instead of objects - object as value version // Test that the wire is able to send optional pointers to structures
TEST_F(WireOptionalTests, OptionalObjectValue) { TEST_F(WireOptionalTests, OptionalStructPointer) {
WGPUBindGroupLayoutDescriptor bglDesc = {}; // Create shader module
bglDesc.entryCount = 0; WGPUShaderModuleDescriptor vertexDescriptor = {};
WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc); WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
WGPUShaderModule apiVsModule = api.GetNewShaderModule();
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout(); // Create the color state descriptor
EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)) WGPUBlendComponent blendComponent = {};
.WillOnce(Return(apiBindGroupLayout)); blendComponent.operation = WGPUBlendOperation_Add;
blendComponent.srcFactor = WGPUBlendFactor_One;
blendComponent.dstFactor = WGPUBlendFactor_One;
WGPUBlendState blendState = {};
blendState.alpha = blendComponent;
blendState.color = blendComponent;
WGPUColorTargetState colorTargetState = {};
colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
colorTargetState.blend = &blendState;
colorTargetState.writeMask = WGPUColorWriteMask_All;
// The `sampler`, `textureView` and `buffer` members of a binding are optional. // Create the depth-stencil state
WGPUBindGroupEntry entry; WGPUStencilFaceState stencilFace = {};
entry.binding = 0; stencilFace.compare = WGPUCompareFunction_Always;
entry.sampler = nullptr; stencilFace.failOp = WGPUStencilOperation_Keep;
entry.textureView = nullptr; stencilFace.depthFailOp = WGPUStencilOperation_Keep;
entry.buffer = nullptr; stencilFace.passOp = WGPUStencilOperation_Keep;
entry.nextInChain = nullptr;
WGPUBindGroupDescriptor bgDesc = {}; WGPUDepthStencilState depthStencilState = {};
bgDesc.layout = bgl; depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
bgDesc.entryCount = 1; depthStencilState.depthWriteEnabled = false;
bgDesc.entries = &entry; depthStencilState.depthCompare = WGPUCompareFunction_Always;
depthStencilState.stencilBack = stencilFace;
depthStencilState.stencilFront = stencilFace;
depthStencilState.stencilReadMask = 0xff;
depthStencilState.stencilWriteMask = 0xff;
depthStencilState.depthBias = 0;
depthStencilState.depthBiasSlopeScale = 0.0;
depthStencilState.depthBiasClamp = 0.0;
wgpuDeviceCreateBindGroup(device, &bgDesc); // Create the pipeline layout
WGPUPipelineLayoutDescriptor layoutDescriptor = {};
layoutDescriptor.bindGroupLayoutCount = 0;
layoutDescriptor.bindGroupLayouts = nullptr;
WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup(); // Create pipeline
EXPECT_CALL(api, DeviceCreateBindGroup( WGPURenderPipelineDescriptor pipelineDescriptor = {};
apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && desc->entryCount == 1 &&
desc->entries[0].binding == 0 &&
desc->entries[0].sampler == nullptr &&
desc->entries[0].buffer == nullptr &&
desc->entries[0].textureView == nullptr;
})))
.WillOnce(Return(apiPlaceholderBindGroup));
FlushClient(); pipelineDescriptor.vertex.module = vsModule;
} pipelineDescriptor.vertex.entryPoint = "main";
pipelineDescriptor.vertex.bufferCount = 0;
pipelineDescriptor.vertex.buffers = nullptr;
// Test that the wire is able to send optional pointers to structures WGPUFragmentState fragment = {};
TEST_F(WireOptionalTests, OptionalStructPointer) { fragment.module = vsModule;
// Create shader module fragment.entryPoint = "main";
WGPUShaderModuleDescriptor vertexDescriptor = {}; fragment.targetCount = 1;
WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor); fragment.targets = &colorTargetState;
WGPUShaderModule apiVsModule = api.GetNewShaderModule(); pipelineDescriptor.fragment = &fragment;
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
// Create the color state descriptor pipelineDescriptor.multisample.count = 1;
WGPUBlendComponent blendComponent = {}; pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
blendComponent.operation = WGPUBlendOperation_Add; pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
blendComponent.srcFactor = WGPUBlendFactor_One; pipelineDescriptor.layout = layout;
blendComponent.dstFactor = WGPUBlendFactor_One; pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
WGPUBlendState blendState = {}; pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
blendState.alpha = blendComponent; pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
blendState.color = blendComponent;
WGPUColorTargetState colorTargetState = {};
colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
colorTargetState.blend = &blendState;
colorTargetState.writeMask = WGPUColorWriteMask_All;
// Create the depth-stencil state // First case: depthStencil is not null.
WGPUStencilFaceState stencilFace = {}; pipelineDescriptor.depthStencil = &depthStencilState;
stencilFace.compare = WGPUCompareFunction_Always; wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
stencilFace.failOp = WGPUStencilOperation_Keep;
stencilFace.depthFailOp = WGPUStencilOperation_Keep;
stencilFace.passOp = WGPUStencilOperation_Keep;
WGPUDepthStencilState depthStencilState = {}; WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8; EXPECT_CALL(
depthStencilState.depthWriteEnabled = false; api,
depthStencilState.depthCompare = WGPUCompareFunction_Always; DeviceCreateRenderPipeline(
depthStencilState.stencilBack = stencilFace; apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
depthStencilState.stencilFront = stencilFace; return desc->depthStencil != nullptr &&
depthStencilState.stencilReadMask = 0xff; desc->depthStencil->nextInChain == nullptr &&
depthStencilState.stencilWriteMask = 0xff; desc->depthStencil->depthWriteEnabled == false &&
depthStencilState.depthBias = 0; desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
depthStencilState.depthBiasSlopeScale = 0.0; desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
depthStencilState.depthBiasClamp = 0.0; desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilBack.depthFailOp ==
WGPUStencilOperation_Keep &&
desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.depthFailOp ==
WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilReadMask == 0xff &&
desc->depthStencil->stencilWriteMask == 0xff &&
desc->depthStencil->depthBias == 0 &&
desc->depthStencil->depthBiasSlopeScale == 0.0 &&
desc->depthStencil->depthBiasClamp == 0.0;
})))
.WillOnce(Return(apiPlaceholderPipeline));
// Create the pipeline layout FlushClient();
WGPUPipelineLayoutDescriptor layoutDescriptor = {};
layoutDescriptor.bindGroupLayoutCount = 0;
layoutDescriptor.bindGroupLayouts = nullptr;
WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
// Create pipeline // Second case: depthStencil is null.
WGPURenderPipelineDescriptor pipelineDescriptor = {}; pipelineDescriptor.depthStencil = nullptr;
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
EXPECT_CALL(
api, DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->depthStencil == nullptr;
})))
.WillOnce(Return(apiPlaceholderPipeline));
pipelineDescriptor.vertex.module = vsModule; FlushClient();
pipelineDescriptor.vertex.entryPoint = "main"; }
pipelineDescriptor.vertex.bufferCount = 0;
pipelineDescriptor.vertex.buffers = nullptr;
WGPUFragmentState fragment = {}; } // namespace dawn::wire
fragment.module = vsModule;
fragment.entryPoint = "main";
fragment.targetCount = 1;
fragment.targets = &colorTargetState;
pipelineDescriptor.fragment = &fragment;
pipelineDescriptor.multisample.count = 1;
pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
pipelineDescriptor.layout = layout;
pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
// First case: depthStencil is not null.
pipelineDescriptor.depthStencil = &depthStencilState;
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
EXPECT_CALL(
api,
DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->depthStencil != nullptr &&
desc->depthStencil->nextInChain == nullptr &&
desc->depthStencil->depthWriteEnabled == false &&
desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilBack.depthFailOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.depthFailOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
desc->depthStencil->stencilReadMask == 0xff &&
desc->depthStencil->stencilWriteMask == 0xff &&
desc->depthStencil->depthBias == 0 &&
desc->depthStencil->depthBiasSlopeScale == 0.0 &&
desc->depthStencil->depthBiasClamp == 0.0;
})))
.WillOnce(Return(apiPlaceholderPipeline));
FlushClient();
// Second case: depthStencil is null.
pipelineDescriptor.depthStencil = nullptr;
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
EXPECT_CALL(api,
DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->depthStencil == nullptr;
})))
.WillOnce(Return(apiPlaceholderPipeline));
FlushClient();
}

View File

@ -17,125 +17,131 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
class MockQueueWorkDoneCallback { using testing::_;
public: using testing::InvokeWithoutArgs;
MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata)); using testing::Mock;
};
static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback; class MockQueueWorkDoneCallback {
static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) { public:
mockQueueWorkDoneCallback->Call(status, userdata); MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
} };
class WireQueueTests : public WireTest { static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
protected: static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
void SetUp() override { mockQueueWorkDoneCallback->Call(status, userdata);
WireTest::SetUp();
mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
} }
void TearDown() override { class WireQueueTests : public WireTest {
WireTest::TearDown(); protected:
mockQueueWorkDoneCallback = nullptr; void SetUp() override {
WireTest::SetUp();
mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
}
void TearDown() override {
WireTest::TearDown();
mockQueueWorkDoneCallback = nullptr;
}
void FlushServer() {
WireTest::FlushServer();
Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback);
}
};
// Test that a successful OnSubmittedWorkDone call is forwarded to the client.
TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) {
wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success);
}));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this))
.Times(1);
FlushServer();
} }
void FlushServer() { // Test that an error OnSubmittedWorkDone call is forwarded as an error to the client.
WireTest::FlushServer(); TEST_F(WireQueueTests, OnSubmittedWorkDoneError) {
Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback); wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
}));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
FlushServer();
} }
};
// Test that a successful OnSubmittedWorkDone call is forwarded to the client. // Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with
TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) { // device loss
wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this); TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) {
EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _)) wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
.WillOnce(InvokeWithoutArgs([&]() { EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success); .WillOnce(InvokeWithoutArgs([&]() {
})); api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
FlushClient(); }));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1); EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
FlushServer(); .Times(1);
} GetWireClient()->Disconnect();
// Test that an error OnSubmittedWorkDone call is forwarded as an error to the client.
TEST_F(WireQueueTests, OnSubmittedWorkDoneError) {
wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
}));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
FlushServer();
}
// Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with
// device loss
TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) {
wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
}));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
.Times(1);
GetWireClient()->Disconnect();
}
// Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with
// device loss
TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) {
GetWireClient()->Disconnect();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
.Times(1);
wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
}
// Hack to pass in test context into user callback
struct TestData {
WireQueueTests* pTest;
WGPUQueue* pTestQueue;
size_t numRequests;
};
static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) {
TestData* testData = reinterpret_cast<TestData*>(userdata);
// Mimic the user callback is sending new requests
ASSERT_NE(testData, nullptr);
ASSERT_NE(testData->pTest, nullptr);
ASSERT_NE(testData->pTestQueue, nullptr);
mockQueueWorkDoneCallback->Call(status, testData->pTest);
// Send the requests a number of times
for (size_t i = 0; i < testData->numRequests; i++) {
wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone,
testData->pTest);
} }
}
// Test that requests inside user callbacks before disconnect are called // Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with
TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) { // device loss
TestData testData = {this, &queue, 10}; TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) {
wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData); GetWireClient()->Disconnect();
EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
}));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this)) EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
.Times(1 + testData.numRequests); .Times(1);
GetWireClient()->Disconnect(); wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
} }
// Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks // Hack to pass in test context into user callback
// since it is always destructed after the test TearDown, and we cannot create a new queue obj struct TestData {
// with wgpuDeviceGetQueue WireQueueTests* pTest;
WGPUQueue* pTestQueue;
size_t numRequests;
};
static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) {
TestData* testData = reinterpret_cast<TestData*>(userdata);
// Mimic the user callback is sending new requests
ASSERT_NE(testData, nullptr);
ASSERT_NE(testData->pTest, nullptr);
ASSERT_NE(testData->pTestQueue, nullptr);
mockQueueWorkDoneCallback->Call(status, testData->pTest);
// Send the requests a number of times
for (size_t i = 0; i < testData->numRequests; i++) {
wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone,
testData->pTest);
}
}
// Test that requests inside user callbacks before disconnect are called
TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) {
TestData testData = {this, &queue, 10};
wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData);
EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
}));
FlushClient();
EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
.Times(1 + testData.numRequests);
GetWireClient()->Disconnect();
}
// Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks
// since it is always destructed after the test TearDown, and we cannot create a new queue obj
// with wgpuDeviceGetQueue
} // namespace dawn::wire

View File

@ -17,220 +17,228 @@
#include "dawn/tests/unittests/wire/WireTest.h" #include "dawn/tests/unittests/wire/WireTest.h"
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
using namespace testing; namespace dawn::wire {
using namespace dawn::wire;
namespace { using testing::_;
using testing::InvokeWithoutArgs;
using testing::Mock;
using testing::Return;
using testing::StrictMock;
// Mock class to add expectations on the wire calling callbacks namespace {
class MockCompilationInfoCallback {
// Mock class to add expectations on the wire calling callbacks
class MockCompilationInfoCallback {
public:
MOCK_METHOD(void,
Call,
(WGPUCompilationInfoRequestStatus status,
const WGPUCompilationInfo* info,
void* userdata));
};
std::unique_ptr<StrictMock<MockCompilationInfoCallback>> mockCompilationInfoCallback;
void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status,
const WGPUCompilationInfo* info,
void* userdata) {
mockCompilationInfoCallback->Call(status, info, userdata);
}
} // anonymous namespace
class WireShaderModuleTests : public WireTest {
public: public:
MOCK_METHOD(void, WireShaderModuleTests() {
Call, }
(WGPUCompilationInfoRequestStatus status, ~WireShaderModuleTests() override = default;
const WGPUCompilationInfo* info,
void* userdata)); void SetUp() override {
WireTest::SetUp();
mockCompilationInfoCallback =
std::make_unique<StrictMock<MockCompilationInfoCallback>>();
apiShaderModule = api.GetNewShaderModule();
WGPUShaderModuleDescriptor descriptor = {};
shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _))
.WillOnce(Return(apiShaderModule))
.RetiresOnSaturation();
FlushClient();
}
void TearDown() override {
WireTest::TearDown();
// Delete mock so that expectations are checked
mockCompilationInfoCallback = nullptr;
}
void FlushClient() {
WireTest::FlushClient();
Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
}
void FlushServer() {
WireTest::FlushServer();
Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
}
protected:
WGPUShaderModule shaderModule;
WGPUShaderModule apiShaderModule;
}; };
std::unique_ptr<StrictMock<MockCompilationInfoCallback>> mockCompilationInfoCallback; // Check getting CompilationInfo for a successfully created shader module
void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status, TEST_F(WireShaderModuleTests, GetCompilationInfo) {
const WGPUCompilationInfo* info, wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
void* userdata) {
mockCompilationInfoCallback->Call(status, info, userdata);
}
} // anonymous namespace WGPUCompilationMessage message = {
nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
WGPUCompilationInfo compilationInfo;
compilationInfo.nextInChain = nullptr;
compilationInfo.messageCount = 1;
compilationInfo.messages = &message;
class WireShaderModuleTests : public WireTest { EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
public: .WillOnce(InvokeWithoutArgs([&]() {
WireShaderModuleTests() { api.CallShaderModuleGetCompilationInfoCallback(
} apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
~WireShaderModuleTests() override = default; }));
void SetUp() override {
WireTest::SetUp();
mockCompilationInfoCallback = std::make_unique<StrictMock<MockCompilationInfoCallback>>();
apiShaderModule = api.GetNewShaderModule();
WGPUShaderModuleDescriptor descriptor = {};
shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor);
EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _))
.WillOnce(Return(apiShaderModule))
.RetiresOnSaturation();
FlushClient(); FlushClient();
EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_Success,
MatchesLambda([&](const WGPUCompilationInfo* info) -> bool {
if (info->messageCount != compilationInfo.messageCount) {
return false;
}
const WGPUCompilationMessage* infoMessage = &info->messages[0];
return strcmp(infoMessage->message, message.message) == 0 &&
infoMessage->nextInChain == message.nextInChain &&
infoMessage->type == message.type &&
infoMessage->lineNum == message.lineNum &&
infoMessage->linePos == message.linePos &&
infoMessage->offset == message.offset &&
infoMessage->length == message.length;
}),
_))
.Times(1);
FlushServer();
} }
void TearDown() override { // Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a
WireTest::TearDown(); // device loss.
TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
// Delete mock so that expectations are checked WGPUCompilationMessage message = {
mockCompilationInfoCallback = nullptr; nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
WGPUCompilationInfo compilationInfo;
compilationInfo.nextInChain = nullptr;
compilationInfo.messageCount = 1;
compilationInfo.messages = &message;
EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallShaderModuleGetCompilationInfoCallback(
apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
}));
FlushClient();
EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
GetWireClient()->Disconnect();
} }
void FlushClient() { // Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a
WireTest::FlushClient(); // device loss.
Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback); TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) {
GetWireClient()->Disconnect();
EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
} }
void FlushServer() { // Hack to pass in test context into user callback
WireTest::FlushServer(); struct TestData {
Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback); WireShaderModuleTests* pTest;
WGPUShaderModule* pTestShaderModule;
size_t numRequests;
};
static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status,
const WGPUCompilationInfo* info,
void* userdata) {
TestData* testData = reinterpret_cast<TestData*>(userdata);
// Mimic the user callback is sending new requests
ASSERT_NE(testData, nullptr);
ASSERT_NE(testData->pTest, nullptr);
ASSERT_NE(testData->pTestShaderModule, nullptr);
mockCompilationInfoCallback->Call(status, info, testData->pTest);
// Send the requests a number of times
for (size_t i = 0; i < testData->numRequests; i++) {
wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule),
ToMockGetCompilationInfoCallback, nullptr);
}
} }
protected: // Test that requests inside user callbacks before disconnect are called
WGPUShaderModule shaderModule; TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) {
WGPUShaderModule apiShaderModule; TestData testData = {this, &shaderModule, 10};
};
// Check getting CompilationInfo for a successfully created shader module wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
TEST_F(WireShaderModuleTests, GetCompilationInfo) { &testData);
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
WGPUCompilationMessage message = { WGPUCompilationMessage message = {
nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
WGPUCompilationInfo compilationInfo; WGPUCompilationInfo compilationInfo;
compilationInfo.nextInChain = nullptr; compilationInfo.nextInChain = nullptr;
compilationInfo.messageCount = 1; compilationInfo.messageCount = 1;
compilationInfo.messages = &message; compilationInfo.messages = &message;
EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
.WillOnce(InvokeWithoutArgs([&]() { .WillOnce(InvokeWithoutArgs([&]() {
api.CallShaderModuleGetCompilationInfoCallback( api.CallShaderModuleGetCompilationInfoCallback(
apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
})); }));
FlushClient();
FlushClient(); EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _))
EXPECT_CALL(*mockCompilationInfoCallback, .Times(1 + testData.numRequests);
Call(WGPUCompilationInfoRequestStatus_Success, GetWireClient()->Disconnect();
MatchesLambda([&](const WGPUCompilationInfo* info) -> bool {
if (info->messageCount != compilationInfo.messageCount) {
return false;
}
const WGPUCompilationMessage* infoMessage = &info->messages[0];
return strcmp(infoMessage->message, message.message) == 0 &&
infoMessage->nextInChain == message.nextInChain &&
infoMessage->type == message.type &&
infoMessage->lineNum == message.lineNum &&
infoMessage->linePos == message.linePos &&
infoMessage->offset == message.offset &&
infoMessage->length == message.length;
}),
_))
.Times(1);
FlushServer();
}
// Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a device
// loss.
TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
WGPUCompilationMessage message = {
nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
WGPUCompilationInfo compilationInfo;
compilationInfo.nextInChain = nullptr;
compilationInfo.messageCount = 1;
compilationInfo.messages = &message;
EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallShaderModuleGetCompilationInfoCallback(
apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
}));
FlushClient();
EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
GetWireClient()->Disconnect();
}
// Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a
// device loss.
TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) {
GetWireClient()->Disconnect();
EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
}
// Hack to pass in test context into user callback
struct TestData {
WireShaderModuleTests* pTest;
WGPUShaderModule* pTestShaderModule;
size_t numRequests;
};
static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status,
const WGPUCompilationInfo* info,
void* userdata) {
TestData* testData = reinterpret_cast<TestData*>(userdata);
// Mimic the user callback is sending new requests
ASSERT_NE(testData, nullptr);
ASSERT_NE(testData->pTest, nullptr);
ASSERT_NE(testData->pTestShaderModule, nullptr);
mockCompilationInfoCallback->Call(status, info, testData->pTest);
// Send the requests a number of times
for (size_t i = 0; i < testData->numRequests; i++) {
wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule),
ToMockGetCompilationInfoCallback, nullptr);
} }
}
// Test that requests inside user callbacks before disconnect are called // Test that requests inside user callbacks before object destruction are called
TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) { TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) {
TestData testData = {this, &shaderModule, 10}; TestData testData = {this, &shaderModule, 10};
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests, wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
&testData); &testData);
WGPUCompilationMessage message = { WGPUCompilationMessage message = {
nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8}; nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
WGPUCompilationInfo compilationInfo; WGPUCompilationInfo compilationInfo;
compilationInfo.nextInChain = nullptr; compilationInfo.nextInChain = nullptr;
compilationInfo.messageCount = 1; compilationInfo.messageCount = 1;
compilationInfo.messages = &message; compilationInfo.messages = &message;
EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _)) EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
.WillOnce(InvokeWithoutArgs([&]() { .WillOnce(InvokeWithoutArgs([&]() {
api.CallShaderModuleGetCompilationInfoCallback( api.CallShaderModuleGetCompilationInfoCallback(
apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo); apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
})); }));
FlushClient(); FlushClient();
EXPECT_CALL(*mockCompilationInfoCallback, EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _)) Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _))
.Times(1 + testData.numRequests); .Times(1 + testData.numRequests);
GetWireClient()->Disconnect(); wgpuShaderModuleRelease(shaderModule);
} }
// Test that requests inside user callbacks before object destruction are called } // namespace dawn::wire
TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) {
TestData testData = {this, &shaderModule, 10};
wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
&testData);
WGPUCompilationMessage message = {
nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
WGPUCompilationInfo compilationInfo;
compilationInfo.nextInChain = nullptr;
compilationInfo.messageCount = 1;
compilationInfo.messages = &message;
EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
.WillOnce(InvokeWithoutArgs([&]() {
api.CallShaderModuleGetCompilationInfoCallback(
apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
}));
FlushClient();
EXPECT_CALL(*mockCompilationInfoCallback,
Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _))
.Times(1 + testData.numRequests);
wgpuShaderModuleRelease(shaderModule);
}

View File

@ -19,8 +19,11 @@
#include "dawn/wire/WireClient.h" #include "dawn/wire/WireClient.h"
#include "dawn/wire/WireServer.h" #include "dawn/wire/WireServer.h"
using namespace testing; using testing::_;
using namespace dawn::wire; using testing::AnyNumber;
using testing::Exactly;
using testing::Mock;
using testing::Return;
WireTest::WireTest() { WireTest::WireTest() {
} }
@ -28,11 +31,11 @@ WireTest::WireTest() {
WireTest::~WireTest() { WireTest::~WireTest() {
} }
client::MemoryTransferService* WireTest::GetClientMemoryTransferService() { dawn::wire::client::MemoryTransferService* WireTest::GetClientMemoryTransferService() {
return nullptr; return nullptr;
} }
server::MemoryTransferService* WireTest::GetServerMemoryTransferService() { dawn::wire::server::MemoryTransferService* WireTest::GetServerMemoryTransferService() {
return nullptr; return nullptr;
} }
@ -50,19 +53,19 @@ void WireTest::SetUp() {
mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>(); mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>(mWireServer.get()); mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>(mWireServer.get());
WireServerDescriptor serverDesc = {}; dawn::wire::WireServerDescriptor serverDesc = {};
serverDesc.procs = &mockProcs; serverDesc.procs = &mockProcs;
serverDesc.serializer = mS2cBuf.get(); serverDesc.serializer = mS2cBuf.get();
serverDesc.memoryTransferService = GetServerMemoryTransferService(); serverDesc.memoryTransferService = GetServerMemoryTransferService();
mWireServer.reset(new WireServer(serverDesc)); mWireServer.reset(new dawn::wire::WireServer(serverDesc));
mC2sBuf->SetHandler(mWireServer.get()); mC2sBuf->SetHandler(mWireServer.get());
WireClientDescriptor clientDesc = {}; dawn::wire::WireClientDescriptor clientDesc = {};
clientDesc.serializer = mC2sBuf.get(); clientDesc.serializer = mC2sBuf.get();
clientDesc.memoryTransferService = GetClientMemoryTransferService(); clientDesc.memoryTransferService = GetClientMemoryTransferService();
mWireClient.reset(new WireClient(clientDesc)); mWireClient.reset(new dawn::wire::WireClient(clientDesc));
mS2cBuf->SetHandler(mWireClient.get()); mS2cBuf->SetHandler(mWireClient.get());
dawnProcSetProcs(&dawn::wire::client::GetProcs()); dawnProcSetProcs(&dawn::wire::client::GetProcs());

File diff suppressed because it is too large Load Diff

View File

@ -20,100 +20,101 @@
#include "dawn/tests/DawnTest.h" #include "dawn/tests/DawnTest.h"
#include "dawn/utils/WGPUHelpers.h" #include "dawn/utils/WGPUHelpers.h"
namespace { namespace dawn::native::d3d12 {
class ExpectBetweenTimestamps : public detail::Expectation { namespace {
public: class ExpectBetweenTimestamps : public ::detail::Expectation {
~ExpectBetweenTimestamps() override = default; public:
~ExpectBetweenTimestamps() override = default;
ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) { ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) {
mValue0 = value0; mValue0 = value0;
mValue1 = value1; mValue1 = value1;
}
// Expect the actual results are between mValue0 and mValue1.
testing::AssertionResult Check(const void* data, size_t size) override {
const uint64_t* actual = static_cast<const uint64_t*>(data);
for (size_t i = 0; i < size / sizeof(uint64_t); ++i) {
if (actual[i] < mValue0 || actual[i] > mValue1) {
return testing::AssertionFailure()
<< "Expected data[" << i << "] to be between " << mValue0 << " and "
<< mValue1 << ", actual " << actual[i] << std::endl;
}
} }
return testing::AssertionSuccess(); // Expect the actual results are between mValue0 and mValue1.
testing::AssertionResult Check(const void* data, size_t size) override {
const uint64_t* actual = static_cast<const uint64_t*>(data);
for (size_t i = 0; i < size / sizeof(uint64_t); ++i) {
if (actual[i] < mValue0 || actual[i] > mValue1) {
return testing::AssertionFailure()
<< "Expected data[" << i << "] to be between " << mValue0 << " and "
<< mValue1 << ", actual " << actual[i] << std::endl;
}
}
return testing::AssertionSuccess();
}
private:
uint64_t mValue0;
uint64_t mValue1;
};
} // anonymous namespace
class D3D12GPUTimestampCalibrationTests : public DawnTest {
protected:
void SetUp() override {
DawnTest::SetUp();
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
// Requires that timestamp query feature is enabled and timestamp query conversion is
// disabled.
DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) ||
!HasToggleEnabled("disable_timestamp_query_conversion"));
} }
private: std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
uint64_t mValue0; std::vector<wgpu::FeatureName> requiredFeatures = {};
uint64_t mValue1; if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
}
return requiredFeatures;
}
}; };
} // anonymous namespace // Check that the timestamps got by timestamp query are between the two timestamps from
// GetClockCalibration() after the timestamp conversion is disabled.
TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
constexpr uint32_t kQueryCount = 2;
using namespace dawn::native::d3d12; wgpu::QuerySetDescriptor querySetDescriptor;
querySetDescriptor.count = kQueryCount;
querySetDescriptor.type = wgpu::QueryType::Timestamp;
wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor);
class D3D12GPUTimestampCalibrationTests : public DawnTest { wgpu::BufferDescriptor bufferDescriptor;
protected: bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
void SetUp() override { bufferDescriptor.usage = wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc |
DawnTest::SetUp(); wgpu::BufferUsage::CopyDst;
wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
DAWN_TEST_UNSUPPORTED_IF(UsesWire()); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
// Requires that timestamp query feature is enabled and timestamp query conversion is encoder.WriteTimestamp(querySet, 0);
// disabled. encoder.WriteTimestamp(querySet, 1);
DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) || wgpu::CommandBuffer commands = encoder.Finish();
!HasToggleEnabled("disable_timestamp_query_conversion"));
Device* d3DDevice = reinterpret_cast<Device*>(device.Get());
uint64_t gpuTimestamp0, gpuTimestamp1;
uint64_t cpuTimestamp0, cpuTimestamp1;
d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0);
queue.Submit(1, &commands);
WaitForAllOperations();
d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1);
// Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp,
// so that the timestamp in the querySet will be closer to both gpuTimestamps from
// GetClockCalibration.
wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
queue.Submit(1, &resolveCommands);
EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t),
new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1));
} }
std::vector<wgpu::FeatureName> GetRequiredFeatures() override { DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
std::vector<wgpu::FeatureName> requiredFeatures = {}; D3D12Backend({"disable_timestamp_query_conversion"}));
if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
}
return requiredFeatures;
}
};
// Check that the timestamps got by timestamp query are between the two timestamps from } // namespace dawn::native::d3d12
// GetClockCalibration() after the timestamp conversion is disabled.
TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
constexpr uint32_t kQueryCount = 2;
wgpu::QuerySetDescriptor querySetDescriptor;
querySetDescriptor.count = kQueryCount;
querySetDescriptor.type = wgpu::QueryType::Timestamp;
wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor);
wgpu::BufferDescriptor bufferDescriptor;
bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
bufferDescriptor.usage =
wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.WriteTimestamp(querySet, 0);
encoder.WriteTimestamp(querySet, 1);
wgpu::CommandBuffer commands = encoder.Finish();
Device* d3DDevice = reinterpret_cast<Device*>(device.Get());
uint64_t gpuTimestamp0, gpuTimestamp1;
uint64_t cpuTimestamp0, cpuTimestamp1;
d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0);
queue.Submit(1, &commands);
WaitForAllOperations();
d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1);
// Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp,
// so that the timestamp in the querySet will be closer to both gpuTimestamps from
// GetClockCalibration.
wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
queue.Submit(1, &resolveCommands);
EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t),
new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1));
}
DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
D3D12Backend({"disable_timestamp_query_conversion"}));

View File

@ -18,91 +18,93 @@
#include "dawn/native/d3d12/TextureD3D12.h" #include "dawn/native/d3d12/TextureD3D12.h"
#include "dawn/tests/DawnTest.h" #include "dawn/tests/DawnTest.h"
using namespace dawn::native::d3d12; namespace dawn::native::d3d12 {
class D3D12ResourceHeapTests : public DawnTest { class D3D12ResourceHeapTests : public DawnTest {
protected: protected:
void SetUp() override { void SetUp() override {
DawnTest::SetUp(); DawnTest::SetUp();
DAWN_TEST_UNSUPPORTED_IF(UsesWire()); DAWN_TEST_UNSUPPORTED_IF(UsesWire());
}
std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
if (!mIsBCFormatSupported) {
return {};
} }
return {wgpu::FeatureName::TextureCompressionBC}; std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
if (!mIsBCFormatSupported) {
return {};
}
return {wgpu::FeatureName::TextureCompressionBC};
}
bool IsBCFormatSupported() const {
return mIsBCFormatSupported;
}
private:
bool mIsBCFormatSupported = false;
};
// Verify that creating a small compressed textures will be 4KB aligned.
TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) {
DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
// TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment.
DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP());
wgpu::TextureDescriptor descriptor;
descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = 8;
descriptor.size.height = 8;
descriptor.size.depthOrArrayLayers = 1;
descriptor.sampleCount = 1;
descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
descriptor.mipLevelCount = 1;
descriptor.usage = wgpu::TextureUsage::TextureBinding;
// Create a smaller one that allows use of the smaller alignment.
wgpu::Texture texture = device.CreateTexture(&descriptor);
Texture* d3dTexture = reinterpret_cast<Texture*>(texture.Get());
EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
static_cast<uint64_t>(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT));
// Create a larger one (>64KB) that forbids use the smaller alignment.
descriptor.size.width = 4096;
descriptor.size.height = 4096;
texture = device.CreateTexture(&descriptor);
d3dTexture = reinterpret_cast<Texture*>(texture.Get());
EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
static_cast<uint64_t>(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT));
} }
bool IsBCFormatSupported() const { // Verify creating a UBO will always be 256B aligned.
return mIsBCFormatSupported; TEST_P(D3D12ResourceHeapTests, AlignUBO) {
// Create a small UBO
wgpu::BufferDescriptor descriptor;
descriptor.size = 4 * 1024;
descriptor.usage = wgpu::BufferUsage::Uniform;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
Buffer* d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
0u);
// Create a larger UBO
descriptor.size = (4 * 1024 * 1024) + 255;
descriptor.usage = wgpu::BufferUsage::Uniform;
buffer = device.CreateBuffer(&descriptor);
d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
0u);
} }
private: DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());
bool mIsBCFormatSupported = false;
};
// Verify that creating a small compressed textures will be 4KB aligned. } // namespace dawn::native::d3d12
TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) {
DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
// TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment.
DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP());
wgpu::TextureDescriptor descriptor;
descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = 8;
descriptor.size.height = 8;
descriptor.size.depthOrArrayLayers = 1;
descriptor.sampleCount = 1;
descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
descriptor.mipLevelCount = 1;
descriptor.usage = wgpu::TextureUsage::TextureBinding;
// Create a smaller one that allows use of the smaller alignment.
wgpu::Texture texture = device.CreateTexture(&descriptor);
Texture* d3dTexture = reinterpret_cast<Texture*>(texture.Get());
EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
static_cast<uint64_t>(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT));
// Create a larger one (>64KB) that forbids use the smaller alignment.
descriptor.size.width = 4096;
descriptor.size.height = 4096;
texture = device.CreateTexture(&descriptor);
d3dTexture = reinterpret_cast<Texture*>(texture.Get());
EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
static_cast<uint64_t>(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT));
}
// Verify creating a UBO will always be 256B aligned.
TEST_P(D3D12ResourceHeapTests, AlignUBO) {
// Create a small UBO
wgpu::BufferDescriptor descriptor;
descriptor.size = 4 * 1024;
descriptor.usage = wgpu::BufferUsage::Uniform;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
Buffer* d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
0u);
// Create a larger UBO
descriptor.size = (4 * 1024 * 1024) + 255;
descriptor.usage = wgpu::BufferUsage::Uniform;
buffer = device.CreateBuffer(&descriptor);
d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
0u);
}
DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());