Add memory synchronization tests - render to compute and vice versa
This change adds GPU memory synchronization tests for buffer. The tests cover data flow from render pass to compute pass via storage buffer, and vice versa. It is the last one for the series of memory sync tests for buffer. BUG=dawn:275 Change-Id: Ic866161cadc0fa9df4c441f3970783077f3a9bd0 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/13921 Commit-Queue: Yunchao He <yunchao.he@intel.com> Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
e568fe138e
commit
733842c59d
|
@ -32,15 +32,9 @@ class GpuMemorySyncTests : public DawnTest {
|
||||||
buffer.SetSubData(0, sizeof(myData), &myData);
|
buffer.SetSubData(0, sizeof(myData), &myData);
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
// Clear storage buffer with zero. Then read data, add one, and write the result to storage buffer
|
std::tuple<wgpu::ComputePipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForCompute(
|
||||||
// in compute pass. Iterate this read-add-write steps per compute pass a few time. The successive
|
const wgpu::Buffer& buffer) {
|
||||||
// iteration reads the result in buffer from last iteration, which makes the iterations a data
|
|
||||||
// dependency chain. The test verifies that data in buffer among iterations in compute passes is
|
|
||||||
// correctly synchronized.
|
|
||||||
TEST_P(GpuMemorySyncTests, ComputePass) {
|
|
||||||
// Create pipeline, bind group, and buffer for compute pass.
|
|
||||||
wgpu::ShaderModule csModule =
|
wgpu::ShaderModule csModule =
|
||||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Compute, R"(
|
utils::CreateShaderModule(device, utils::SingleShaderStage::Compute, R"(
|
||||||
#version 450
|
#version 450
|
||||||
|
@ -55,45 +49,22 @@ TEST_P(GpuMemorySyncTests, ComputePass) {
|
||||||
device, {
|
device, {
|
||||||
{0, wgpu::ShaderStage::Compute, wgpu::BindingType::StorageBuffer},
|
{0, wgpu::ShaderStage::Compute, wgpu::BindingType::StorageBuffer},
|
||||||
});
|
});
|
||||||
wgpu::PipelineLayout pipelineLayout = utils::MakeBasicPipelineLayout(device, &bgl);
|
wgpu::PipelineLayout pipelineLayout0 = utils::MakeBasicPipelineLayout(device, &bgl);
|
||||||
|
|
||||||
wgpu::ComputePipelineDescriptor cpDesc;
|
wgpu::ComputePipelineDescriptor cpDesc;
|
||||||
cpDesc.layout = pipelineLayout;
|
cpDesc.layout = pipelineLayout0;
|
||||||
cpDesc.computeStage.module = csModule;
|
cpDesc.computeStage.module = csModule;
|
||||||
cpDesc.computeStage.entryPoint = "main";
|
cpDesc.computeStage.entryPoint = "main";
|
||||||
wgpu::ComputePipeline compute = device.CreateComputePipeline(&cpDesc);
|
wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
|
||||||
|
|
||||||
wgpu::Buffer buffer = CreateBuffer();
|
wgpu::BindGroup bindGroup =
|
||||||
|
utils::MakeBindGroup(device, bgl, {{0, buffer, 0, sizeof(int)}});
|
||||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}});
|
return std::make_tuple(pipeline, bindGroup);
|
||||||
|
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
|
||||||
|
|
||||||
// Iterate the read-add-write operations in compute pass a few times.
|
|
||||||
int iteration = 3;
|
|
||||||
for (int i = 0; i < iteration; ++i) {
|
|
||||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
|
||||||
pass.SetPipeline(compute);
|
|
||||||
pass.SetBindGroup(0, bindGroup);
|
|
||||||
pass.Dispatch(1, 1, 1);
|
|
||||||
pass.EndPass();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the result.
|
std::tuple<wgpu::RenderPipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForRender(
|
||||||
wgpu::CommandBuffer commands = encoder.Finish();
|
const wgpu::Buffer& buffer,
|
||||||
queue.Submit(1, &commands);
|
wgpu::TextureFormat colorFormat) {
|
||||||
|
|
||||||
EXPECT_BUFFER_U32_EQ(iteration, buffer, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear storage buffer with zero. Then read data, add one, and write the result to storage buffer
|
|
||||||
// in render pass. Iterate this read-add-write steps per render pass a few time. The successive
|
|
||||||
// iteration reads the result in buffer from last iteration, which makes the iterations a data
|
|
||||||
// dependency chain. In addition, color output by fragment shader depends on the data in storage
|
|
||||||
// buffer, so we can check color in render target to verify that data in buffer among iterations in
|
|
||||||
// render passes is correctly synchronized.
|
|
||||||
TEST_P(GpuMemorySyncTests, RenderPass) {
|
|
||||||
// Create pipeline, bind group, and buffer for render pass.
|
|
||||||
wgpu::ShaderModule vsModule =
|
wgpu::ShaderModule vsModule =
|
||||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
|
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
|
||||||
#version 450
|
#version 450
|
||||||
|
@ -120,21 +91,65 @@ TEST_P(GpuMemorySyncTests, RenderPass) {
|
||||||
});
|
});
|
||||||
wgpu::PipelineLayout pipelineLayout = utils::MakeBasicPipelineLayout(device, &bgl);
|
wgpu::PipelineLayout pipelineLayout = utils::MakeBasicPipelineLayout(device, &bgl);
|
||||||
|
|
||||||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
|
||||||
|
|
||||||
utils::ComboRenderPipelineDescriptor rpDesc(device);
|
utils::ComboRenderPipelineDescriptor rpDesc(device);
|
||||||
rpDesc.layout = pipelineLayout;
|
rpDesc.layout = pipelineLayout;
|
||||||
rpDesc.vertexStage.module = vsModule;
|
rpDesc.vertexStage.module = vsModule;
|
||||||
rpDesc.cFragmentStage.module = fsModule;
|
rpDesc.cFragmentStage.module = fsModule;
|
||||||
rpDesc.primitiveTopology = wgpu::PrimitiveTopology::PointList;
|
rpDesc.primitiveTopology = wgpu::PrimitiveTopology::PointList;
|
||||||
rpDesc.cColorStates[0].format = renderPass.colorFormat;
|
rpDesc.cColorStates[0].format = colorFormat;
|
||||||
|
|
||||||
wgpu::RenderPipeline render = device.CreateRenderPipeline(&rpDesc);
|
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
|
||||||
|
|
||||||
|
wgpu::BindGroup bindGroup =
|
||||||
|
utils::MakeBindGroup(device, bgl, {{0, buffer, 0, sizeof(int)}});
|
||||||
|
return std::make_tuple(pipeline, bindGroup);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Clear storage buffer with zero. Then read data, add one, and write the result to storage buffer
|
||||||
|
// in compute pass. Iterate this read-add-write steps per compute pass a few time. The successive
|
||||||
|
// iteration reads the result in buffer from last iteration, which makes the iterations a data
|
||||||
|
// dependency chain. The test verifies that data in buffer among iterations in compute passes is
|
||||||
|
// correctly synchronized.
|
||||||
|
TEST_P(GpuMemorySyncTests, ComputePass) {
|
||||||
|
// Create pipeline, bind group, and buffer for compute pass.
|
||||||
wgpu::Buffer buffer = CreateBuffer();
|
wgpu::Buffer buffer = CreateBuffer();
|
||||||
|
wgpu::ComputePipeline compute;
|
||||||
|
wgpu::BindGroup bindGroup;
|
||||||
|
std::tie(compute, bindGroup) = CreatePipelineAndBindGroupForCompute(buffer);
|
||||||
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
|
|
||||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}});
|
// Iterate the read-add-write operations in compute pass a few times.
|
||||||
|
int iteration = 3;
|
||||||
|
for (int i = 0; i < iteration; ++i) {
|
||||||
|
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||||
|
pass.SetPipeline(compute);
|
||||||
|
pass.SetBindGroup(0, bindGroup);
|
||||||
|
pass.Dispatch(1, 1, 1);
|
||||||
|
pass.EndPass();
|
||||||
|
}
|
||||||
|
|
||||||
|
wgpu::CommandBuffer commands = encoder.Finish();
|
||||||
|
queue.Submit(1, &commands);
|
||||||
|
|
||||||
|
// Verify the result.
|
||||||
|
EXPECT_BUFFER_U32_EQ(iteration, buffer, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear storage buffer with zero. Then read data, add one, and write the result to storage buffer
|
||||||
|
// in render pass. Iterate this read-add-write steps per render pass a few time. The successive
|
||||||
|
// iteration reads the result in buffer from last iteration, which makes the iterations a data
|
||||||
|
// dependency chain. In addition, color output by fragment shader depends on the data in storage
|
||||||
|
// buffer, so we can check color in render target to verify that data in buffer among iterations in
|
||||||
|
// render passes is correctly synchronized.
|
||||||
|
TEST_P(GpuMemorySyncTests, RenderPass) {
|
||||||
|
// Create pipeline, bind group, and buffer for render pass.
|
||||||
|
wgpu::Buffer buffer = CreateBuffer();
|
||||||
|
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
||||||
|
wgpu::RenderPipeline render;
|
||||||
|
wgpu::BindGroup bindGroup;
|
||||||
|
std::tie(render, bindGroup) =
|
||||||
|
CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
|
|
||||||
// Iterate the read-add-write operations in render pass a few times.
|
// Iterate the read-add-write operations in render pass a few times.
|
||||||
|
@ -154,6 +169,82 @@ TEST_P(GpuMemorySyncTests, RenderPass) {
|
||||||
EXPECT_PIXEL_RGBA8_EQ(RGBA8(iteration, 0, 0, 255), renderPass.color, 0, 0);
|
EXPECT_PIXEL_RGBA8_EQ(RGBA8(iteration, 0, 0, 255), renderPass.color, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write into a storage buffer in a render pass. Then read that data in a compute
|
||||||
|
// pass. And verify the data flow is correctly synchronized.
|
||||||
|
TEST_P(GpuMemorySyncTests, RenderPassToComputePass) {
|
||||||
|
// Create pipeline, bind group, and buffer for render pass and compute pass.
|
||||||
|
wgpu::Buffer buffer = CreateBuffer();
|
||||||
|
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
||||||
|
wgpu::RenderPipeline render;
|
||||||
|
wgpu::BindGroup bindGroup0;
|
||||||
|
std::tie(render, bindGroup0) =
|
||||||
|
CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
|
||||||
|
|
||||||
|
wgpu::ComputePipeline compute;
|
||||||
|
wgpu::BindGroup bindGroup1;
|
||||||
|
std::tie(compute, bindGroup1) = CreatePipelineAndBindGroupForCompute(buffer);
|
||||||
|
|
||||||
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
|
|
||||||
|
// Write data into a storage buffer in render pass.
|
||||||
|
wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
|
||||||
|
pass0.SetPipeline(render);
|
||||||
|
pass0.SetBindGroup(0, bindGroup0);
|
||||||
|
pass0.Draw(1, 1, 0, 0);
|
||||||
|
pass0.EndPass();
|
||||||
|
|
||||||
|
// Read that data in render pass.
|
||||||
|
wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
|
||||||
|
pass1.SetPipeline(compute);
|
||||||
|
pass1.SetBindGroup(0, bindGroup1);
|
||||||
|
pass1.Dispatch(1, 1, 1);
|
||||||
|
pass1.EndPass();
|
||||||
|
|
||||||
|
wgpu::CommandBuffer commands = encoder.Finish();
|
||||||
|
queue.Submit(1, &commands);
|
||||||
|
|
||||||
|
// Verify the result.
|
||||||
|
EXPECT_BUFFER_U32_EQ(2, buffer, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write into a storage buffer in a compute pass. Then read that data in a render
|
||||||
|
// pass. And verify the data flow is correctly synchronized.
|
||||||
|
TEST_P(GpuMemorySyncTests, ComputePassToRenderPass) {
|
||||||
|
// Create pipeline, bind group, and buffer for compute pass and render pass.
|
||||||
|
wgpu::Buffer buffer = CreateBuffer();
|
||||||
|
wgpu::ComputePipeline compute;
|
||||||
|
wgpu::BindGroup bindGroup1;
|
||||||
|
std::tie(compute, bindGroup1) = CreatePipelineAndBindGroupForCompute(buffer);
|
||||||
|
|
||||||
|
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
||||||
|
wgpu::RenderPipeline render;
|
||||||
|
wgpu::BindGroup bindGroup0;
|
||||||
|
std::tie(render, bindGroup0) =
|
||||||
|
CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
|
||||||
|
|
||||||
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
|
|
||||||
|
// Write data into a storage buffer in render pass.
|
||||||
|
wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
|
||||||
|
pass0.SetPipeline(compute);
|
||||||
|
pass0.SetBindGroup(0, bindGroup1);
|
||||||
|
pass0.Dispatch(1, 1, 1);
|
||||||
|
pass0.EndPass();
|
||||||
|
|
||||||
|
// Read that data in render pass.
|
||||||
|
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
|
||||||
|
pass1.SetPipeline(render);
|
||||||
|
pass1.SetBindGroup(0, bindGroup0);
|
||||||
|
pass1.Draw(1, 1, 0, 0);
|
||||||
|
pass1.EndPass();
|
||||||
|
|
||||||
|
wgpu::CommandBuffer commands = encoder.Finish();
|
||||||
|
queue.Submit(1, &commands);
|
||||||
|
|
||||||
|
// Verify the result.
|
||||||
|
EXPECT_PIXEL_RGBA8_EQ(RGBA8(2, 0, 0, 255), renderPass.color, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
class StorageToUniformSyncTests : public DawnTest {
|
class StorageToUniformSyncTests : public DawnTest {
|
||||||
protected:
|
protected:
|
||||||
void CreateBuffer() {
|
void CreateBuffer() {
|
||||||
|
|
Loading…
Reference in New Issue