mirror of
https://github.com/encounter/dawn-cmake.git
synced 2025-12-14 15:46:28 +00:00
Fix the initialization of the buffers with MapRead and MapAtCreation
This patch fixes the issues on the initialization of buffers with MapRead usage and mappedAtCreation == true. 1. The buffers with MapRead usage and mappedAtCreation == true can be read from the CPU side just after the creation of the buffer, however at that time the internal pending command buffers may not be executed, thus causing the buffer is not cleared as is expected. 2. On D3D12 the buffer with MapRead and mappedAtCreation == true is created on the READBACK heap, so all the data written in the CPU side cannot be uploaded to the GPU memory. When the buffer is mapped again all the original data written through the CPU pointer will be overwritten by the data in the GPU memory (which means it is also cleared to 0). This patch fixes this issue by: 1. clearing the buffers with mappedAtCreation == true on the CPU side. 2. on D3D12 making the buffer with MapRead and mappedAtCreation == true use the staging buffer instead of mapping itself. Note that this change is only related to the code path with Toggle "nonzero_clear_resources_on_creation_for_testing" enabled, currently we don't plan to do the similar change when we enable Dawn wire. BUG=dawn:414 TEST=dawn_end2end_tests Change-Id: I2b3d0840333e8d99759800ab9fc141d0a7cf2f8d Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/27220 Reviewed-by: Corentin Wallez <cwallez@chromium.org> Commit-Queue: Jiawei Shao <jiawei.shao@intel.com>
This commit is contained in:
committed by
Commit Bot service account
parent
0b89b27263
commit
1c4a7f780f
@@ -683,12 +683,20 @@ TEST_P(BufferMappedAtCreationTests, GetMappedRangeZeroSized) {
|
||||
buffer.Unmap();
|
||||
}
|
||||
|
||||
// TODO(jiawei.shao@intel.com): remove "lazy_clear_buffer_on_first_use" when we complete the
|
||||
// support of buffer lazy initialization.
|
||||
DAWN_INSTANTIATE_TEST(BufferMappedAtCreationTests,
|
||||
D3D12Backend(),
|
||||
D3D12Backend({}, {"use_d3d12_resource_heap_tier2"}),
|
||||
D3D12Backend({"lazy_clear_buffer_on_first_use"}),
|
||||
D3D12Backend({"lazy_clear_buffer_on_first_use"},
|
||||
{"use_d3d12_resource_heap_tier2"}),
|
||||
MetalBackend(),
|
||||
MetalBackend({"lazy_clear_buffer_on_first_use"}),
|
||||
OpenGLBackend(),
|
||||
VulkanBackend());
|
||||
OpenGLBackend({"lazy_clear_buffer_on_first_use"}),
|
||||
VulkanBackend(),
|
||||
VulkanBackend({"lazy_clear_buffer_on_first_use"}));
|
||||
|
||||
class BufferTests : public DawnTest {};
|
||||
|
||||
|
||||
@@ -762,19 +762,91 @@ TEST_P(BufferZeroInitTest, MapAsync_Write) {
|
||||
|
||||
// Test that the code path of creating a buffer with BufferDescriptor.mappedAtCreation == true
|
||||
// clears the buffer correctly at the creation of the buffer.
|
||||
TEST_P(BufferZeroInitTest, MapAtCreation) {
|
||||
TEST_P(BufferZeroInitTest, MappedAtCreation) {
|
||||
constexpr uint32_t kBufferSize = 16u;
|
||||
constexpr wgpu::BufferUsage kBufferUsage =
|
||||
wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
|
||||
|
||||
wgpu::Buffer buffer;
|
||||
EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
|
||||
buffer.Unmap();
|
||||
|
||||
constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedData = {{0, 0, 0, 0}};
|
||||
EXPECT_LAZY_CLEAR(
|
||||
0u, EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<const uint32_t*>(kExpectedData.data()),
|
||||
buffer, 0, kExpectedData.size()));
|
||||
|
||||
// Buffer with MapRead usage
|
||||
{
|
||||
constexpr wgpu::BufferUsage kBufferUsage = wgpu::BufferUsage::MapRead;
|
||||
|
||||
wgpu::Buffer buffer;
|
||||
EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
|
||||
const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
|
||||
buffer.Unmap();
|
||||
|
||||
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, kBufferSize);
|
||||
mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
|
||||
buffer.Unmap();
|
||||
}
|
||||
|
||||
// Buffer with MapRead usage and upload the buffer (from CPU and GPU)
|
||||
{
|
||||
constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedFinalData = {
|
||||
{10, 20, 30, 40}};
|
||||
|
||||
constexpr wgpu::BufferUsage kBufferUsage =
|
||||
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
|
||||
|
||||
wgpu::Buffer buffer;
|
||||
EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
|
||||
|
||||
// Update data from the CPU side.
|
||||
uint32_t* mappedData = static_cast<uint32_t*>(buffer.GetMappedRange());
|
||||
mappedData[2] = kExpectedFinalData[2];
|
||||
mappedData[3] = kExpectedFinalData[3];
|
||||
buffer.Unmap();
|
||||
|
||||
// Update data from the GPU side.
|
||||
wgpu::Buffer uploadBuffer = utils::CreateBufferFromData(
|
||||
device, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst,
|
||||
{kExpectedFinalData[0], kExpectedFinalData[1]});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(uploadBuffer, 0, buffer, 0, 2 * sizeof(uint32_t));
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
|
||||
|
||||
// Check the content of the buffer on the CPU side
|
||||
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, kBufferSize);
|
||||
const uint32_t* constMappedData =
|
||||
static_cast<const uint32_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(kExpectedFinalData.data(), constMappedData, kBufferSize));
|
||||
}
|
||||
|
||||
// Buffer with MapWrite usage
|
||||
{
|
||||
constexpr wgpu::BufferUsage kBufferUsage =
|
||||
wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
|
||||
|
||||
wgpu::Buffer buffer;
|
||||
EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
|
||||
|
||||
const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
|
||||
buffer.Unmap();
|
||||
|
||||
EXPECT_LAZY_CLEAR(
|
||||
0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), buffer, 0, kExpectedData.size()));
|
||||
}
|
||||
|
||||
// Buffer with neither MapRead nor MapWrite usage
|
||||
{
|
||||
constexpr wgpu::BufferUsage kBufferUsage = wgpu::BufferUsage::CopySrc;
|
||||
|
||||
wgpu::Buffer buffer;
|
||||
EXPECT_LAZY_CLEAR(1u, buffer = CreateBuffer(kBufferSize, kBufferUsage, true));
|
||||
|
||||
const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, kExpectedData.data(), kBufferSize));
|
||||
buffer.Unmap();
|
||||
|
||||
EXPECT_LAZY_CLEAR(
|
||||
0u, EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), buffer, 0, kExpectedData.size()));
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the code path of CopyBufferToTexture clears the source buffer correctly when it is the
|
||||
|
||||
@@ -14,9 +14,26 @@
|
||||
|
||||
#include "tests/DawnTest.h"
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
class NonzeroBufferCreationTests : public DawnTest {};
|
||||
class NonzeroBufferCreationTests : public DawnTest {
|
||||
public:
|
||||
void MapReadAsyncAndWait(wgpu::Buffer buffer, uint64_t offset, uint64_t size) {
|
||||
bool done = false;
|
||||
buffer.MapAsync(
|
||||
wgpu::MapMode::Read, offset, size,
|
||||
[](WGPUBufferMapAsyncStatus status, void* userdata) {
|
||||
ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
|
||||
*static_cast<bool*>(userdata) = true;
|
||||
},
|
||||
&done);
|
||||
|
||||
while (!done) {
|
||||
WaitABit();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Verify that each byte of the buffer has all been initialized to 1 with the toggle enabled when it
|
||||
// is created with CopyDst usage.
|
||||
@@ -29,7 +46,7 @@ TEST_P(NonzeroBufferCreationTests, BufferCreationWithCopyDstUsage) {
|
||||
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
|
||||
|
||||
std::vector<uint8_t> expectedData(kSize, static_cast<uint8_t>(1u));
|
||||
std::vector<uint8_t> expectedData(kSize, uint8_t(1u));
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()), buffer, 0,
|
||||
kSize / sizeof(uint32_t));
|
||||
}
|
||||
@@ -45,11 +62,72 @@ TEST_P(NonzeroBufferCreationTests, BufferCreationWithMapWriteWithoutCopyDstUsage
|
||||
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
|
||||
|
||||
std::vector<uint8_t> expectedData(kSize, static_cast<uint8_t>(1u));
|
||||
std::vector<uint8_t> expectedData(kSize, uint8_t(1u));
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()), buffer, 0,
|
||||
kSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// Verify that each byte of the buffer has all been initialized to 1 with the toggle enabled when
|
||||
// it is created with mappedAtCreation == true.
|
||||
TEST_P(NonzeroBufferCreationTests, BufferCreationWithMappedAtCreation) {
|
||||
// When we use Dawn wire, the lazy initialization of the buffers with mappedAtCreation == true
|
||||
// are done in the Dawn wire and we don't plan to get it work with the toggle
|
||||
// "nonzero_clear_resources_on_creation_for_testing" (we will have more tests on it in the
|
||||
// BufferZeroInitTests.
|
||||
DAWN_SKIP_TEST_IF(UsesWire());
|
||||
|
||||
constexpr uint32_t kSize = 32u;
|
||||
|
||||
wgpu::BufferDescriptor defaultDescriptor;
|
||||
defaultDescriptor.size = kSize;
|
||||
defaultDescriptor.mappedAtCreation = true;
|
||||
|
||||
const std::vector<uint8_t> expectedData(kSize, uint8_t(1u));
|
||||
const uint32_t* expectedDataPtr = reinterpret_cast<const uint32_t*>(expectedData.data());
|
||||
|
||||
// Buffer with MapRead usage
|
||||
{
|
||||
wgpu::BufferDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.usage = wgpu::BufferUsage::MapRead;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
|
||||
|
||||
const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
|
||||
buffer.Unmap();
|
||||
|
||||
MapReadAsyncAndWait(buffer, 0, kSize);
|
||||
mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
|
||||
buffer.Unmap();
|
||||
}
|
||||
|
||||
// Buffer with MapWrite usage
|
||||
{
|
||||
wgpu::BufferDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
|
||||
|
||||
const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
|
||||
buffer.Unmap();
|
||||
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expectedDataPtr, buffer, 0, kSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// Buffer with neither MapRead nor MapWrite usage
|
||||
{
|
||||
wgpu::BufferDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.usage = wgpu::BufferUsage::CopySrc;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
|
||||
|
||||
const uint8_t* mappedData = static_cast<const uint8_t*>(buffer.GetConstMappedRange());
|
||||
EXPECT_EQ(0, memcmp(mappedData, expectedData.data(), kSize));
|
||||
buffer.Unmap();
|
||||
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expectedDataPtr, buffer, 0, kSize / sizeof(uint32_t));
|
||||
}
|
||||
}
|
||||
|
||||
DAWN_INSTANTIATE_TEST(NonzeroBufferCreationTests,
|
||||
D3D12Backend({"nonzero_clear_resources_on_creation_for_testing"}),
|
||||
MetalBackend({"nonzero_clear_resources_on_creation_for_testing"}),
|
||||
|
||||
Reference in New Issue
Block a user