mirror of
https://github.com/encounter/dawn-cmake.git
synced 2025-12-18 09:25:25 +00:00
Properly implement per-dispatch synchronization scopes.
Below are the list of all the individual changes, which are a good order in which to review this CL. Core changes: - Change the tracking in the frontend to produce a synchronization scope per dispatch instead of per compute pass. Some bindgroups might not be part of any synchronization scopes so we also track all the referenced resources on the side so they can be checked during Queue::Submit validation. - Fix clearing in the GL and Metal backends to use the per-dispatch synchronization scopes. - Fix the Vulkan backend to use the per dispatch synchronization scopes to produce the correct pipeline barriers. This allows the removal of previous logic that was subtly incorrect for Indirect buffer. This allows the merging of the Compute and Render DescriptorSetTracker into a single small helper class. - D3D12 changes are similar to Vulkan, but the simplification is just a the suppression of a branch with a lot of code in BindGroupStateTracker. Test changes: - Fixup all the ResourceUsageTracking tests to follow the WebGPU spec for synchronization scopes (fixing a lot of TODOs). - Add additional tests checking that Indirect buffers are not allowed to be used as a writeable storage in the same synchronization scope. - Add tests for Queue::Submit validation correctly taking into account resources that are bound but unused in compute passes. - Add an end2end test for using a buffer as Indirect and Storage at the same time in a DispatchIndirect, which would previously produce incorrect barriers in the Vulkan and D3D12 backends. Other small changes (that I was to lazy to put in a different CL): - Add the utils::MakePipelineLayout helper function. - Fix Indirect not being in the list of readonly buffer usages (caught by a test added in this CL). Bug: dawn:632 Change-Id: I77263c3535a4ba995faccbf26255da9a2f6ed3b5 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/49887 Commit-Queue: Corentin Wallez <cwallez@chromium.org> Reviewed-by: Stephen White <senorblanco@chromium.org>
This commit is contained in:
committed by
Commit Bot service account
parent
d0ebcbac7a
commit
76732abfe5
@@ -219,4 +219,147 @@ namespace {
|
||||
WaitForAllOperations(device);
|
||||
}
|
||||
|
||||
// Test that buffers in unused compute pass bindgroups are still checked for in
|
||||
// Queue::Submit validation.
|
||||
TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeBuffer) {
|
||||
wgpu::Queue queue = device.GetQueue();
|
||||
|
||||
wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
|
||||
wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
|
||||
|
||||
wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
|
||||
|
||||
// In this test we check that BindGroup 1 is checked, the texture test will check
|
||||
// BindGroup 2. This is to provide coverage of for loops in validation code.
|
||||
wgpu::ComputePipelineDescriptor cpDesc;
|
||||
cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, testBGL});
|
||||
cpDesc.computeStage.entryPoint = "main";
|
||||
cpDesc.computeStage.module =
|
||||
utils::CreateShaderModule(device, "[[stage(compute)]] fn main() {}");
|
||||
wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
|
||||
|
||||
wgpu::BufferDescriptor bufDesc;
|
||||
bufDesc.size = 4;
|
||||
bufDesc.usage = wgpu::BufferUsage::Storage;
|
||||
|
||||
// Test that completely unused bindgroups still have their buffers checked.
|
||||
for (bool destroy : {true, false}) {
|
||||
wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
|
||||
wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(1, unusedBG);
|
||||
pass.EndPass();
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
|
||||
if (destroy) {
|
||||
unusedBuffer.Destroy();
|
||||
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
|
||||
} else {
|
||||
queue.Submit(1, &commands);
|
||||
}
|
||||
}
|
||||
|
||||
// Test that unused bindgroups because they were replaced still have their buffers checked.
|
||||
for (bool destroy : {true, false}) {
|
||||
wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
|
||||
wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
|
||||
|
||||
wgpu::Buffer usedBuffer = device.CreateBuffer(&bufDesc);
|
||||
wgpu::BindGroup usedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, emptyBG);
|
||||
pass.SetBindGroup(1, unusedBG);
|
||||
pass.SetBindGroup(1, usedBG);
|
||||
pass.SetPipeline(pipeline);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
|
||||
if (destroy) {
|
||||
unusedBuffer.Destroy();
|
||||
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
|
||||
} else {
|
||||
queue.Submit(1, &commands);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that textures in unused compute pass bindgroups are still checked for in
|
||||
// Queue::Submit validation.
|
||||
TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeTextures) {
|
||||
wgpu::Queue queue = device.GetQueue();
|
||||
|
||||
wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
|
||||
wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
|
||||
|
||||
wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
|
||||
|
||||
wgpu::ComputePipelineDescriptor cpDesc;
|
||||
cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, emptyBGL, testBGL});
|
||||
cpDesc.computeStage.entryPoint = "main";
|
||||
cpDesc.computeStage.module =
|
||||
utils::CreateShaderModule(device, "[[stage(compute)]] fn main() {}");
|
||||
wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
|
||||
|
||||
wgpu::TextureDescriptor texDesc;
|
||||
texDesc.size = {1, 1, 1};
|
||||
texDesc.usage = wgpu::TextureUsage::Sampled;
|
||||
texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
|
||||
// Test that completely unused bindgroups still have their buffers checked.
|
||||
for (bool destroy : {true, false}) {
|
||||
wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
|
||||
wgpu::BindGroup unusedBG =
|
||||
utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(2, unusedBG);
|
||||
pass.EndPass();
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
|
||||
if (destroy) {
|
||||
unusedTexture.Destroy();
|
||||
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
|
||||
} else {
|
||||
queue.Submit(1, &commands);
|
||||
}
|
||||
}
|
||||
|
||||
// Test that unused bindgroups because they were replaced still have their buffers checked.
|
||||
for (bool destroy : {true, false}) {
|
||||
wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
|
||||
wgpu::BindGroup unusedBG =
|
||||
utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
|
||||
|
||||
wgpu::Texture usedTexture = device.CreateTexture(&texDesc);
|
||||
wgpu::BindGroup usedBG =
|
||||
utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, emptyBG);
|
||||
pass.SetBindGroup(1, emptyBG);
|
||||
pass.SetBindGroup(2, unusedBG);
|
||||
pass.SetBindGroup(2, usedBG);
|
||||
pass.SetPipeline(pipeline);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
|
||||
if (destroy) {
|
||||
unusedTexture.Destroy();
|
||||
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
|
||||
} else {
|
||||
queue.Submit(1, &commands);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
@@ -61,12 +61,12 @@ namespace {
|
||||
return device.CreateRenderPipeline2(&pipelineDescriptor);
|
||||
}
|
||||
|
||||
wgpu::ComputePipeline CreateNoOpComputePipeline() {
|
||||
wgpu::ComputePipeline CreateNoOpComputePipeline(std::vector<wgpu::BindGroupLayout> bgls) {
|
||||
wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
|
||||
[[stage(compute)]] fn main() {
|
||||
})");
|
||||
wgpu::ComputePipelineDescriptor pipelineDescriptor;
|
||||
pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, nullptr);
|
||||
pipelineDescriptor.layout = utils::MakePipelineLayout(device, std::move(bgls));
|
||||
pipelineDescriptor.computeStage.module = csModule;
|
||||
pipelineDescriptor.computeStage.entryPoint = "main";
|
||||
return device.CreateComputePipeline(&pipelineDescriptor);
|
||||
@@ -149,7 +149,7 @@ namespace {
|
||||
utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
|
||||
|
||||
// Create a no-op compute pipeline
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// It is valid to use the buffer as both storage and readonly storage in a single
|
||||
// compute pass if dispatch command is not called.
|
||||
@@ -170,15 +170,14 @@ namespace {
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add buffer usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test using multiple writable usages on the same buffer in a single pass/dispatch
|
||||
TEST_F(ResourceUsageTrackingTest, BufferWithMultipleWriteUsage) {
|
||||
// Test the use of a buffer as a storage buffer multiple times in the same synchronization
|
||||
// scope.
|
||||
TEST_F(ResourceUsageTrackingTest, BufferUsedAsStorageMultipleTimes) {
|
||||
// Create buffer and bind group
|
||||
wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
|
||||
|
||||
@@ -203,32 +202,16 @@ namespace {
|
||||
|
||||
// test compute pass
|
||||
{
|
||||
// Create a no-op compute pipeline
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
// It is valid to use multiple storage usages on the same buffer in a dispatch
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// It is valid to use the same buffer as multiple writeable usages in a single compute
|
||||
// pass if dispatch command is not called.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
||||
// It is invalid to use the same buffer as multiple writeable usages in a single
|
||||
// dispatch
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(cp);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add buffer usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
}
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(cp);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -368,17 +351,19 @@ namespace {
|
||||
wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp0 = CreateNoOpComputePipeline({bgl0});
|
||||
wgpu::ComputePipeline cp1 = CreateNoOpComputePipeline({bgl1});
|
||||
|
||||
// It is valid to use the same buffer as both readable and writable in different
|
||||
// dispatches within the same compute pass.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(cp);
|
||||
|
||||
pass.SetPipeline(cp0);
|
||||
pass.SetBindGroup(0, bg0);
|
||||
pass.Dispatch(1);
|
||||
|
||||
pass.SetPipeline(cp1);
|
||||
pass.SetBindGroup(0, bg1);
|
||||
pass.Dispatch(1);
|
||||
|
||||
@@ -431,7 +416,7 @@ namespace {
|
||||
wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
|
||||
|
||||
// It is invalid to use the same buffer as both readable and writable usages in a single
|
||||
// dispatch
|
||||
@@ -444,9 +429,7 @@ namespace {
|
||||
pass.Dispatch(1);
|
||||
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add buffer usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -479,11 +462,17 @@ namespace {
|
||||
|
||||
// Use the buffer as both copy dst and readonly storage in compute pass
|
||||
{
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl1});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
|
||||
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, bg1);
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
|
||||
encoder.Finish();
|
||||
}
|
||||
}
|
||||
@@ -601,7 +590,7 @@ namespace {
|
||||
|
||||
// test compute pass
|
||||
{
|
||||
// Create buffers that will be used as index and storage buffers
|
||||
// Create buffers that will be used as readonly and writable storage buffers
|
||||
wgpu::Buffer buffer0 = CreateBuffer(512, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Storage);
|
||||
|
||||
@@ -616,11 +605,11 @@ namespace {
|
||||
wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, buffer1, 0, 4}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
|
||||
|
||||
// Set bind group against the same index twice. The second one overwrites the first one.
|
||||
// Then no buffer is used as both read and write in the same pass. But the overwritten
|
||||
// bind group still take effect.
|
||||
// Then no buffer is used as both read and write in the same dispatch. But the
|
||||
// overwritten bind group still take effect.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
@@ -630,13 +619,11 @@ namespace {
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add buffer usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
}
|
||||
|
||||
// Set bind group against the same index twice. The second one overwrites the first one.
|
||||
// Then buffer0 is used as both read and write in the same pass
|
||||
// Then buffer0 is used as both read and write in the same dispatch
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
@@ -646,9 +633,7 @@ namespace {
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add buffer usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -686,18 +671,16 @@ namespace {
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// These two bindings are invisible in compute pass. But we still track these bindings.
|
||||
// These two bindings are invisible in the dispatch. But we still track these bindings.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(cp);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add buffer usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -727,7 +710,7 @@ namespace {
|
||||
|
||||
// Test compute pass for bind group. The conflict of readonly storage and storage buffer
|
||||
// usage resides between compute stage and fragment stage. But the fragment stage binding is
|
||||
// not visible in compute pass.
|
||||
// not visible in the dispatch.
|
||||
{
|
||||
wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
|
||||
@@ -736,10 +719,10 @@ namespace {
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// Buffer usage in compute stage conflicts with buffer usage in fragment stage. And
|
||||
// binding for fragment stage is not visible in compute pass. But we still track this
|
||||
// binding for fragment stage is not visible in the dispatch. But we still track this
|
||||
// invisible binding.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
@@ -747,9 +730,7 @@ namespace {
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add buffer usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -803,9 +784,8 @@ namespace {
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
|
||||
// Test compute pass for bind groups with unused bindings. The conflict of readonly storage
|
||||
// and storage usages resides in different bind groups, although some bindings may not be
|
||||
// used because its bind group layout is not designated in pipeline layout.
|
||||
// Test that an unused bind group is not used to detect conflicts between bindings in
|
||||
// compute passes.
|
||||
{
|
||||
// Create bind groups. The bindings are visible for compute pass.
|
||||
wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
|
||||
@@ -816,23 +796,11 @@ namespace {
|
||||
wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
|
||||
wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
|
||||
|
||||
// Create a passthrough compute pipeline with a readonly buffer
|
||||
wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
|
||||
[[block]] struct RBuffer {
|
||||
value : f32;
|
||||
};
|
||||
[[group(0), binding(0)]] var<storage> rBuffer : [[access(read)]] RBuffer;
|
||||
[[stage(compute)]] fn main() {
|
||||
})");
|
||||
wgpu::ComputePipelineDescriptor pipelineDescriptor;
|
||||
pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl0);
|
||||
pipelineDescriptor.computeStage.module = csModule;
|
||||
pipelineDescriptor.computeStage.entryPoint = "main";
|
||||
wgpu::ComputePipeline cp = device.CreateComputePipeline(&pipelineDescriptor);
|
||||
// Create a compute pipeline with only one of the two BGLs.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl0});
|
||||
|
||||
// Resource in bg1 conflicts with resources used in bg0. However, the binding in bg1 is
|
||||
// not used in pipeline. But we still track this binding and read/write usage in one
|
||||
// dispatch is not allowed.
|
||||
// not used in pipeline so no error is produced in the dispatch.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, bg0);
|
||||
@@ -840,8 +808,6 @@ namespace {
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add resource tracking per dispatch for compute pass
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
}
|
||||
}
|
||||
@@ -875,9 +841,13 @@ namespace {
|
||||
// Test compute pass
|
||||
{
|
||||
// Use the texture as both sampled and readonly storage in the same compute pass
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
@@ -924,7 +894,7 @@ namespace {
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
|
||||
|
||||
// Create a no-op compute pipeline
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// It is valid to use the texture as both sampled and writeonly storage in a single
|
||||
// compute pass if dispatch command is not called.
|
||||
@@ -945,9 +915,7 @@ namespace {
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add texture usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1008,31 +976,17 @@ namespace {
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
|
||||
|
||||
// Create a no-op compute pipeline
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// It is valid to use the texture as multiple writeonly storage usages in a single
|
||||
// compute pass if dispatch command is not called.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
||||
// It is invalid to use the texture as multiple writeonly storage usages in a single
|
||||
// dispatch
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(cp);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add texture usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
}
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(cp);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1189,17 +1143,19 @@ namespace {
|
||||
wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
|
||||
wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
|
||||
|
||||
// It is valid to use the same texture as both readable and writable in different
|
||||
// dispatches within the same compute pass.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(cp);
|
||||
|
||||
pass.SetPipeline(readCp);
|
||||
pass.SetBindGroup(0, readBG);
|
||||
pass.Dispatch(1);
|
||||
|
||||
pass.SetPipeline(writeCp);
|
||||
pass.SetBindGroup(0, writeBG);
|
||||
pass.Dispatch(1);
|
||||
|
||||
@@ -1258,7 +1214,7 @@ namespace {
|
||||
wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
|
||||
|
||||
// It is invalid to use the same texture as both readable and writable usages in a
|
||||
// single dispatch
|
||||
@@ -1271,9 +1227,7 @@ namespace {
|
||||
pass.Dispatch(1);
|
||||
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add texture usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1309,10 +1263,14 @@ namespace {
|
||||
device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view1}});
|
||||
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToTexture(&srcView, &dstView, ©Size);
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
@@ -1386,11 +1344,11 @@ namespace {
|
||||
wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, view1}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
|
||||
|
||||
// Set bind group on the same index twice. The second one overwrites the first one.
|
||||
// No texture is used as both readonly and writeonly storage in the same pass. But the
|
||||
// overwritten texture still take effect during resource tracking.
|
||||
// No texture is used as both readonly and writeonly storage in the same dispatch so
|
||||
// there are no errors.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
@@ -1400,13 +1358,12 @@ namespace {
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add texture usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
}
|
||||
|
||||
// Set bind group on the same index twice. The second one overwrites the first one.
|
||||
// texture0 is used as both writeonly and readonly storage in the same pass.
|
||||
// texture0 is used as both writeonly and readonly storage in the same dispatch, which
|
||||
// is an error.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
@@ -1416,9 +1373,7 @@ namespace {
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add texture usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1460,7 +1415,7 @@ namespace {
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// These two bindings are invisible in compute pass. But we still track these bindings.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
@@ -1469,9 +1424,7 @@ namespace {
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add texture usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1514,7 +1467,7 @@ namespace {
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
|
||||
|
||||
// Create a no-op compute pipeline.
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline();
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
|
||||
|
||||
// Texture usage in compute stage conflicts with texture usage in fragment stage. And
|
||||
// binding for fragment stage is not visible in compute pass. But we still track this
|
||||
@@ -1525,9 +1478,7 @@ namespace {
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add texture usage tracking for compute
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1581,19 +1532,10 @@ namespace {
|
||||
|
||||
// Test compute pass
|
||||
{
|
||||
// Create a passthrough compute pipeline with a readonly storage texture
|
||||
wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
|
||||
[[group(0), binding(0)]] var tex : [[access(read)]] texture_storage_2d<rgba8unorm>;
|
||||
[[stage(compute)]] fn main() {
|
||||
})");
|
||||
wgpu::ComputePipelineDescriptor pipelineDescriptor;
|
||||
pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &readBGL);
|
||||
pipelineDescriptor.computeStage.module = csModule;
|
||||
pipelineDescriptor.computeStage.entryPoint = "main";
|
||||
wgpu::ComputePipeline cp = device.CreateComputePipeline(&pipelineDescriptor);
|
||||
wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL});
|
||||
|
||||
// Texture binding in readBG conflicts with texture binding in writeBG. The binding
|
||||
// in writeBG is not used in pipeline. But we still track this binding.
|
||||
// in writeBG is not used in pipeline's layout so it isn't an error.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetBindGroup(0, readBG);
|
||||
@@ -1601,19 +1543,81 @@ namespace {
|
||||
pass.SetPipeline(cp);
|
||||
pass.Dispatch(1);
|
||||
pass.EndPass();
|
||||
// TODO (yunchao.he@intel.com): add resource tracking per dispatch for compute pass
|
||||
// ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
encoder.Finish();
|
||||
}
|
||||
}
|
||||
|
||||
// Test that using an indirect buffer is disallowed with a writable usage (like storage) but
|
||||
// allowed with a readable usage (like readonly storage).
|
||||
TEST_F(ResourceUsageTrackingTest, IndirectBufferWithReadOrWriteStorage) {
|
||||
wgpu::Buffer buffer =
|
||||
CreateBuffer(20, wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage);
|
||||
|
||||
wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
|
||||
wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
|
||||
|
||||
wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
|
||||
wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
|
||||
|
||||
// Test pipelines
|
||||
wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
|
||||
wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
|
||||
wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
|
||||
|
||||
// Test that indirect + readonly is allowed in the same render pass.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
pass.SetBindGroup(0, readBG);
|
||||
pass.DrawIndirect(buffer, 0);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
||||
// Test that indirect + writable is disallowed in the same render pass.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
pass.SetBindGroup(0, writeBG);
|
||||
pass.DrawIndirect(buffer, 0);
|
||||
pass.EndPass();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
|
||||
// Test that indirect + readonly is allowed in the same dispatch
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(readCp);
|
||||
pass.SetBindGroup(0, readBG);
|
||||
pass.DispatchIndirect(buffer, 0);
|
||||
pass.EndPass();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
||||
// Test that indirect + writable is disallowed in the same dispatch
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.SetPipeline(writeCp);
|
||||
pass.SetBindGroup(0, writeBG);
|
||||
pass.DispatchIndirect(buffer, 0);
|
||||
pass.EndPass();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (yunchao.he@intel.com):
|
||||
//
|
||||
// * Add tests for multiple encoders upon the same resource simultaneously. This situation fits
|
||||
// some cases like VR, multi-threading, etc.
|
||||
//
|
||||
// * Add tests for indirect buffer
|
||||
//
|
||||
// * Add tests for bundle
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
Reference in New Issue
Block a user