Add memory synchronization tests - render to compute and vice versa
This change adds GPU memory synchronization tests for buffer. The tests cover data flow from render pass to compute pass via storage buffer, and vice versa. It is the last one for the series of memory sync tests for buffer. BUG=dawn:275 Change-Id: Ic866161cadc0fa9df4c441f3970783077f3a9bd0 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/13921 Commit-Queue: Yunchao He <yunchao.he@intel.com> Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
e568fe138e
commit
733842c59d
|
@ -32,6 +32,78 @@ class GpuMemorySyncTests : public DawnTest {
|
|||
buffer.SetSubData(0, sizeof(myData), &myData);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
std::tuple<wgpu::ComputePipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForCompute(
|
||||
const wgpu::Buffer& buffer) {
|
||||
wgpu::ShaderModule csModule =
|
||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Compute, R"(
|
||||
#version 450
|
||||
layout(std140, set = 0, binding = 0) buffer Data {
|
||||
int a;
|
||||
} data;
|
||||
void main() {
|
||||
data.a += 1;
|
||||
})");
|
||||
|
||||
wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
|
||||
device, {
|
||||
{0, wgpu::ShaderStage::Compute, wgpu::BindingType::StorageBuffer},
|
||||
});
|
||||
wgpu::PipelineLayout pipelineLayout0 = utils::MakeBasicPipelineLayout(device, &bgl);
|
||||
|
||||
wgpu::ComputePipelineDescriptor cpDesc;
|
||||
cpDesc.layout = pipelineLayout0;
|
||||
cpDesc.computeStage.module = csModule;
|
||||
cpDesc.computeStage.entryPoint = "main";
|
||||
wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
|
||||
|
||||
wgpu::BindGroup bindGroup =
|
||||
utils::MakeBindGroup(device, bgl, {{0, buffer, 0, sizeof(int)}});
|
||||
return std::make_tuple(pipeline, bindGroup);
|
||||
}
|
||||
|
||||
std::tuple<wgpu::RenderPipeline, wgpu::BindGroup> CreatePipelineAndBindGroupForRender(
|
||||
const wgpu::Buffer& buffer,
|
||||
wgpu::TextureFormat colorFormat) {
|
||||
wgpu::ShaderModule vsModule =
|
||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
|
||||
#version 450
|
||||
void main() {
|
||||
gl_Position = vec4(0.f, 0.f, 0.f, 1.f);
|
||||
gl_PointSize = 1.0;
|
||||
})");
|
||||
|
||||
wgpu::ShaderModule fsModule =
|
||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
|
||||
#version 450
|
||||
layout (set = 0, binding = 0) buffer Data {
|
||||
int i;
|
||||
} data;
|
||||
layout(location = 0) out vec4 fragColor;
|
||||
void main() {
|
||||
data.i += 1;
|
||||
fragColor = vec4(data.i / 255.f, 0.f, 0.f, 1.f);
|
||||
})");
|
||||
|
||||
wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
|
||||
device, {
|
||||
{0, wgpu::ShaderStage::Fragment, wgpu::BindingType::StorageBuffer},
|
||||
});
|
||||
wgpu::PipelineLayout pipelineLayout = utils::MakeBasicPipelineLayout(device, &bgl);
|
||||
|
||||
utils::ComboRenderPipelineDescriptor rpDesc(device);
|
||||
rpDesc.layout = pipelineLayout;
|
||||
rpDesc.vertexStage.module = vsModule;
|
||||
rpDesc.cFragmentStage.module = fsModule;
|
||||
rpDesc.primitiveTopology = wgpu::PrimitiveTopology::PointList;
|
||||
rpDesc.cColorStates[0].format = colorFormat;
|
||||
|
||||
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&rpDesc);
|
||||
|
||||
wgpu::BindGroup bindGroup =
|
||||
utils::MakeBindGroup(device, bgl, {{0, buffer, 0, sizeof(int)}});
|
||||
return std::make_tuple(pipeline, bindGroup);
|
||||
}
|
||||
};
|
||||
|
||||
// Clear storage buffer with zero. Then read data, add one, and write the result to storage buffer
|
||||
|
@ -41,32 +113,10 @@ class GpuMemorySyncTests : public DawnTest {
|
|||
// correctly synchronized.
|
||||
TEST_P(GpuMemorySyncTests, ComputePass) {
|
||||
// Create pipeline, bind group, and buffer for compute pass.
|
||||
wgpu::ShaderModule csModule =
|
||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Compute, R"(
|
||||
#version 450
|
||||
layout(std140, set = 0, binding = 0) buffer Data {
|
||||
int a;
|
||||
} data;
|
||||
void main() {
|
||||
data.a += 1;
|
||||
})");
|
||||
|
||||
wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
|
||||
device, {
|
||||
{0, wgpu::ShaderStage::Compute, wgpu::BindingType::StorageBuffer},
|
||||
});
|
||||
wgpu::PipelineLayout pipelineLayout = utils::MakeBasicPipelineLayout(device, &bgl);
|
||||
|
||||
wgpu::ComputePipelineDescriptor cpDesc;
|
||||
cpDesc.layout = pipelineLayout;
|
||||
cpDesc.computeStage.module = csModule;
|
||||
cpDesc.computeStage.entryPoint = "main";
|
||||
wgpu::ComputePipeline compute = device.CreateComputePipeline(&cpDesc);
|
||||
|
||||
wgpu::Buffer buffer = CreateBuffer();
|
||||
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}});
|
||||
|
||||
wgpu::ComputePipeline compute;
|
||||
wgpu::BindGroup bindGroup;
|
||||
std::tie(compute, bindGroup) = CreatePipelineAndBindGroupForCompute(buffer);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
||||
// Iterate the read-add-write operations in compute pass a few times.
|
||||
|
@ -79,10 +129,10 @@ TEST_P(GpuMemorySyncTests, ComputePass) {
|
|||
pass.EndPass();
|
||||
}
|
||||
|
||||
// Verify the result.
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
// Verify the result.
|
||||
EXPECT_BUFFER_U32_EQ(iteration, buffer, 0);
|
||||
}
|
||||
|
||||
|
@ -94,47 +144,12 @@ TEST_P(GpuMemorySyncTests, ComputePass) {
|
|||
// render passes is correctly synchronized.
|
||||
TEST_P(GpuMemorySyncTests, RenderPass) {
|
||||
// Create pipeline, bind group, and buffer for render pass.
|
||||
wgpu::ShaderModule vsModule =
|
||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
|
||||
#version 450
|
||||
void main() {
|
||||
gl_Position = vec4(0.f, 0.f, 0.f, 1.f);
|
||||
gl_PointSize = 1.0;
|
||||
})");
|
||||
|
||||
wgpu::ShaderModule fsModule =
|
||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
|
||||
#version 450
|
||||
layout (set = 0, binding = 0) buffer Data {
|
||||
int i;
|
||||
} data;
|
||||
layout(location = 0) out vec4 fragColor;
|
||||
void main() {
|
||||
data.i += 1;
|
||||
fragColor = vec4(data.i / 255.f, 0.f, 0.f, 1.f);
|
||||
})");
|
||||
|
||||
wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
|
||||
device, {
|
||||
{0, wgpu::ShaderStage::Fragment, wgpu::BindingType::StorageBuffer},
|
||||
});
|
||||
wgpu::PipelineLayout pipelineLayout = utils::MakeBasicPipelineLayout(device, &bgl);
|
||||
|
||||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
||||
|
||||
utils::ComboRenderPipelineDescriptor rpDesc(device);
|
||||
rpDesc.layout = pipelineLayout;
|
||||
rpDesc.vertexStage.module = vsModule;
|
||||
rpDesc.cFragmentStage.module = fsModule;
|
||||
rpDesc.primitiveTopology = wgpu::PrimitiveTopology::PointList;
|
||||
rpDesc.cColorStates[0].format = renderPass.colorFormat;
|
||||
|
||||
wgpu::RenderPipeline render = device.CreateRenderPipeline(&rpDesc);
|
||||
|
||||
wgpu::Buffer buffer = CreateBuffer();
|
||||
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}});
|
||||
|
||||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
||||
wgpu::RenderPipeline render;
|
||||
wgpu::BindGroup bindGroup;
|
||||
std::tie(render, bindGroup) =
|
||||
CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
||||
// Iterate the read-add-write operations in render pass a few times.
|
||||
|
@ -154,6 +169,82 @@ TEST_P(GpuMemorySyncTests, RenderPass) {
|
|||
EXPECT_PIXEL_RGBA8_EQ(RGBA8(iteration, 0, 0, 255), renderPass.color, 0, 0);
|
||||
}
|
||||
|
||||
// Write into a storage buffer in a render pass. Then read that data in a compute
|
||||
// pass. And verify the data flow is correctly synchronized.
|
||||
TEST_P(GpuMemorySyncTests, RenderPassToComputePass) {
|
||||
// Create pipeline, bind group, and buffer for render pass and compute pass.
|
||||
wgpu::Buffer buffer = CreateBuffer();
|
||||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
||||
wgpu::RenderPipeline render;
|
||||
wgpu::BindGroup bindGroup0;
|
||||
std::tie(render, bindGroup0) =
|
||||
CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
|
||||
|
||||
wgpu::ComputePipeline compute;
|
||||
wgpu::BindGroup bindGroup1;
|
||||
std::tie(compute, bindGroup1) = CreatePipelineAndBindGroupForCompute(buffer);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
||||
// Write data into a storage buffer in render pass.
|
||||
wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
|
||||
pass0.SetPipeline(render);
|
||||
pass0.SetBindGroup(0, bindGroup0);
|
||||
pass0.Draw(1, 1, 0, 0);
|
||||
pass0.EndPass();
|
||||
|
||||
// Read that data in render pass.
|
||||
wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
|
||||
pass1.SetPipeline(compute);
|
||||
pass1.SetBindGroup(0, bindGroup1);
|
||||
pass1.Dispatch(1, 1, 1);
|
||||
pass1.EndPass();
|
||||
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
// Verify the result.
|
||||
EXPECT_BUFFER_U32_EQ(2, buffer, 0);
|
||||
}
|
||||
|
||||
// Write into a storage buffer in a compute pass. Then read that data in a render
|
||||
// pass. And verify the data flow is correctly synchronized.
|
||||
TEST_P(GpuMemorySyncTests, ComputePassToRenderPass) {
|
||||
// Create pipeline, bind group, and buffer for compute pass and render pass.
|
||||
wgpu::Buffer buffer = CreateBuffer();
|
||||
wgpu::ComputePipeline compute;
|
||||
wgpu::BindGroup bindGroup1;
|
||||
std::tie(compute, bindGroup1) = CreatePipelineAndBindGroupForCompute(buffer);
|
||||
|
||||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, 1, 1);
|
||||
wgpu::RenderPipeline render;
|
||||
wgpu::BindGroup bindGroup0;
|
||||
std::tie(render, bindGroup0) =
|
||||
CreatePipelineAndBindGroupForRender(buffer, renderPass.colorFormat);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
||||
// Write data into a storage buffer in render pass.
|
||||
wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
|
||||
pass0.SetPipeline(compute);
|
||||
pass0.SetBindGroup(0, bindGroup1);
|
||||
pass0.Dispatch(1, 1, 1);
|
||||
pass0.EndPass();
|
||||
|
||||
// Read that data in render pass.
|
||||
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass.renderPassInfo);
|
||||
pass1.SetPipeline(render);
|
||||
pass1.SetBindGroup(0, bindGroup0);
|
||||
pass1.Draw(1, 1, 0, 0);
|
||||
pass1.EndPass();
|
||||
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
// Verify the result.
|
||||
EXPECT_PIXEL_RGBA8_EQ(RGBA8(2, 0, 0, 255), renderPass.color, 0, 0);
|
||||
}
|
||||
|
||||
class StorageToUniformSyncTests : public DawnTest {
|
||||
protected:
|
||||
void CreateBuffer() {
|
||||
|
|
Loading…
Reference in New Issue