Reuse BGL validation in default layout computations.

Instead of calling validation functions directly in
PipelineLayoutBase::CreateDefault, use ValidateBGLDesc and
ValidatePipelineLayoutDesc.

  Also makes the visibility of the default layout match the aggregation as
in the WebGPU spec.

  Also makes refcounting of BGLs a bit less manual at the bottom of
CreateDefault.

  Also adds tests for minBufferBindingSize and visiblity aggregation in
the default layout computations.

Bug: dawn:527
Change-Id: I6bbd5f3de8b235dddf6cbd2bedfd34a094fcb277
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/28560
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Stephen White <senorblanco@chromium.org>
This commit is contained in:
Corentin Wallez 2020-09-22 11:04:45 +00:00 committed by Commit Bot service account
parent 52a588f3c9
commit aeda49ba50
4 changed files with 194 additions and 101 deletions

View File

@ -32,23 +32,9 @@
namespace dawn_native {
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase*,
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor);
MaybeError ValidateBindingTypeWithShaderStageVisibility(
wgpu::BindingType bindingType,
wgpu::ShaderStage shaderStageVisibility);
MaybeError ValidateStorageTextureFormat(DeviceBase* device,
wgpu::BindingType bindingType,
wgpu::TextureFormat storageTextureFormat);
MaybeError ValidateStorageTextureViewDimension(wgpu::BindingType bindingType,
wgpu::TextureViewDimension dimension);
MaybeError ValidateBindingCanBeMultisampled(wgpu::BindingType bindingType,
wgpu::TextureViewDimension viewDimension);
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
// into a packed range of |BindingIndex| integers.

View File

@ -57,10 +57,6 @@ namespace dawn_native {
case wgpu::BindingType::WriteonlyStorageTexture:
perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
break;
default:
UNREACHABLE();
break;
}
ASSERT(perStageBindingCountMember != nullptr);

View File

@ -28,33 +28,14 @@ namespace dawn_native {
bool InferredBindGroupLayoutEntriesCompatible(const BindGroupLayoutEntry& lhs,
const BindGroupLayoutEntry& rhs) {
// Minimum buffer binding size excluded because we take the maximum seen across stages
return lhs.binding == rhs.binding && lhs.visibility == rhs.visibility &&
lhs.type == rhs.type && lhs.hasDynamicOffset == rhs.hasDynamicOffset &&
// Minimum buffer binding size excluded because we take the maximum seen across stages.
// Visibility is excluded because we take the OR across stages.
return lhs.binding == rhs.binding && lhs.type == rhs.type &&
lhs.hasDynamicOffset == rhs.hasDynamicOffset &&
lhs.multisampled == rhs.multisampled && lhs.viewDimension == rhs.viewDimension &&
lhs.textureComponentType == rhs.textureComponentType;
}
wgpu::ShaderStage GetShaderStageVisibilityWithBindingType(wgpu::BindingType bindingType) {
// TODO(jiawei.shao@intel.com): support read-only and read-write storage textures.
switch (bindingType) {
case wgpu::BindingType::StorageBuffer:
case wgpu::BindingType::WriteonlyStorageTexture:
return wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute;
case wgpu::BindingType::UniformBuffer:
case wgpu::BindingType::ReadonlyStorageBuffer:
case wgpu::BindingType::Sampler:
case wgpu::BindingType::ComparisonSampler:
case wgpu::BindingType::SampledTexture:
case wgpu::BindingType::ReadonlyStorageTexture:
return wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment |
wgpu::ShaderStage::Compute;
}
return {};
}
} // anonymous namespace
MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
@ -142,22 +123,7 @@ namespace dawn_native {
BindGroupLayoutEntry bindingSlot;
bindingSlot.binding = static_cast<uint32_t>(bindingNumber);
DAWN_TRY(ValidateBindingTypeWithShaderStageVisibility(bindingInfo.type,
StageBit(shaderStage)));
DAWN_TRY(ValidateStorageTextureFormat(device, bindingInfo.type,
bindingInfo.storageTextureFormat));
DAWN_TRY(ValidateStorageTextureViewDimension(bindingInfo.type,
bindingInfo.viewDimension));
if (bindingInfo.multisampled) {
DAWN_TRY(ValidateBindingCanBeMultisampled(bindingInfo.type,
bindingInfo.viewDimension));
}
bindingSlot.visibility =
GetShaderStageVisibilityWithBindingType(bindingInfo.type);
bindingSlot.visibility = StageBit(shaderStage);
bindingSlot.type = bindingInfo.type;
bindingSlot.hasDynamicOffset = false;
bindingSlot.multisampled = bindingInfo.multisampled;
@ -181,11 +147,14 @@ namespace dawn_native {
"not compatible with previous declaration");
}
// Use the max |minBufferBindingSize| we find
// Use the max |minBufferBindingSize| we find.
existingEntry->minBufferBindingSize =
std::max(existingEntry->minBufferBindingSize,
bindingSlot.minBufferBindingSize);
// Use the OR of all the stages at which we find this binding.
existingEntry->visibility |= bindingSlot.visibility;
// Already used slot, continue
continue;
}
@ -206,36 +175,38 @@ namespace dawn_native {
}
}
DAWN_TRY(ValidateBindingCounts(bindingCounts));
ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bindGroupLayouts = {};
// Create the deduced BGLs, validating if they are valid.
ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
BindGroupLayoutDescriptor desc = {};
desc.entries = entryData[group].data();
desc.entryCount = static_cast<uint32_t>(entryCounts[group]);
// We should never produce a bad descriptor.
ASSERT(!ValidateBindGroupLayoutDescriptor(device, &desc).IsError());
DAWN_TRY(ValidateBindGroupLayoutDescriptor(device, &desc));
DAWN_TRY_ASSIGN(bindGroupLayouts[group], device->GetOrCreateBindGroupLayout(&desc));
Ref<BindGroupLayoutBase> bgl;
DAWN_TRY_ASSIGN(bgl, device->GetOrCreateBindGroupLayout(&desc));
bindGroupLayouts[group] = bgl.Detach();
ASSERT(!bindGroupLayouts[group]->IsError());
}
// Create the deduced pipeline layout, validating if it is valid.
PipelineLayoutBase* pipelineLayout = nullptr;
{
ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
bgls[group] = bindGroupLayouts[group].Get();
}
PipelineLayoutDescriptor desc = {};
desc.bindGroupLayouts = bindGroupLayouts.data();
desc.bindGroupLayouts = bgls.data();
desc.bindGroupLayoutCount = static_cast<uint32_t>(bindGroupLayoutCount);
PipelineLayoutBase* pipelineLayout = device->CreatePipelineLayout(&desc);
DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc));
DAWN_TRY_ASSIGN(pipelineLayout, device->GetOrCreatePipelineLayout(&desc));
ASSERT(!pipelineLayout->IsError());
// These bind group layouts are created internally and referenced by the pipeline layout.
// Release the external refcount.
for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
if (bindGroupLayouts[group] != nullptr) {
bindGroupLayouts[group]->Release();
}
}
// Sanity check in debug that the pipeline layout is compatible with the current pipeline.
for (const StageAndDescriptor& stage : stages) {
const EntryPointMetadata& metadata =
stage.second->module->GetEntryPoint(stage.second->entryPoint, stage.first);

View File

@ -19,9 +19,6 @@
class GetBindGroupLayoutTests : public ValidationTest {
protected:
static constexpr wgpu::ShaderStage kVisibilityAll =
wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
wgpu::RenderPipeline RenderPipelineFromFragmentShader(const char* shader) {
wgpu::ShaderModule vsModule =
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
@ -78,19 +75,21 @@ TEST_F(GetBindGroupLayoutTests, SameObject) {
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
// The same value is returned for the same index.
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(0).Get());
EXPECT_EQ(pipeline.GetBindGroupLayout(1).Get(), pipeline.GetBindGroupLayout(1).Get());
// Matching bind group layouts at different indices are the same object.
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(1).Get());
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(2).Get());
// BGLs with different bindings types are different objects.
EXPECT_NE(pipeline.GetBindGroupLayout(2).Get(), pipeline.GetBindGroupLayout(3).Get());
EXPECT_NE(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(3).Get());
// BGLs with different visibilities are different objects.
EXPECT_NE(pipeline.GetBindGroupLayout(0).Get(), pipeline.GetBindGroupLayout(2).Get());
}
// Test that getBindGroupLayout defaults are correct
// - shader stage visibility is All
// - shader stage visibility is the stage that adds the binding.
// - dynamic offsets is false
TEST_F(GetBindGroupLayoutTests, DefaultShaderStageAndDynamicOffsets) {
wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
@ -114,22 +113,19 @@ TEST_F(GetBindGroupLayoutTests, DefaultShaderStageAndDynamicOffsets) {
// Check that visibility and dynamic offsets match
binding.hasDynamicOffset = false;
binding.visibility = kVisibilityAll;
binding.visibility = wgpu::ShaderStage::Fragment;
EXPECT_EQ(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
// Check that any change in visibility doesn't match.
binding.visibility = wgpu::ShaderStage::Vertex;
EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
binding.visibility = wgpu::ShaderStage::Fragment;
EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
binding.visibility = wgpu::ShaderStage::Compute;
EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
// Check that any change in hasDynamicOffsets doesn't match.
binding.hasDynamicOffset = true;
binding.visibility = kVisibilityAll;
binding.visibility = wgpu::ShaderStage::Fragment;
EXPECT_NE(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
}
@ -154,7 +150,7 @@ TEST_F(GetBindGroupLayoutTests, ComputePipeline) {
wgpu::BindGroupLayoutEntry binding = {};
binding.binding = 0;
binding.type = wgpu::BindingType::UniformBuffer;
binding.visibility = kVisibilityAll;
binding.visibility = wgpu::ShaderStage::Compute;
binding.hasDynamicOffset = false;
binding.minBufferBindingSize = 4 * sizeof(float);
@ -172,6 +168,7 @@ TEST_F(GetBindGroupLayoutTests, BindingType) {
binding.hasDynamicOffset = false;
binding.multisampled = false;
binding.minBufferBindingSize = 4 * sizeof(float);
binding.visibility = wgpu::ShaderStage::Fragment;
wgpu::BindGroupLayoutDescriptor desc = {};
desc.entryCount = 1;
@ -179,7 +176,7 @@ TEST_F(GetBindGroupLayoutTests, BindingType) {
{
// Storage buffer binding is not supported in vertex shader.
binding.visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
binding.visibility = wgpu::ShaderStage::Fragment;
binding.type = wgpu::BindingType::StorageBuffer;
wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
#version 450
@ -190,8 +187,6 @@ TEST_F(GetBindGroupLayoutTests, BindingType) {
void main() {})");
EXPECT_EQ(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
}
binding.visibility = kVisibilityAll;
{
binding.type = wgpu::BindingType::UniformBuffer;
wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
@ -243,7 +238,7 @@ TEST_F(GetBindGroupLayoutTests, Multisampled) {
wgpu::BindGroupLayoutEntry binding = {};
binding.binding = 0;
binding.type = wgpu::BindingType::SampledTexture;
binding.visibility = kVisibilityAll;
binding.visibility = wgpu::ShaderStage::Fragment;
binding.hasDynamicOffset = false;
wgpu::BindGroupLayoutDescriptor desc = {};
@ -276,7 +271,7 @@ TEST_F(GetBindGroupLayoutTests, ViewDimension) {
wgpu::BindGroupLayoutEntry binding = {};
binding.binding = 0;
binding.type = wgpu::BindingType::SampledTexture;
binding.visibility = kVisibilityAll;
binding.visibility = wgpu::ShaderStage::Fragment;
binding.hasDynamicOffset = false;
binding.multisampled = false;
@ -350,7 +345,7 @@ TEST_F(GetBindGroupLayoutTests, TextureComponentType) {
wgpu::BindGroupLayoutEntry binding = {};
binding.binding = 0;
binding.type = wgpu::BindingType::SampledTexture;
binding.visibility = kVisibilityAll;
binding.visibility = wgpu::ShaderStage::Fragment;
binding.hasDynamicOffset = false;
binding.multisampled = false;
@ -393,7 +388,7 @@ TEST_F(GetBindGroupLayoutTests, TextureComponentType) {
TEST_F(GetBindGroupLayoutTests, BindingIndices) {
wgpu::BindGroupLayoutEntry binding = {};
binding.type = wgpu::BindingType::UniformBuffer;
binding.visibility = kVisibilityAll;
binding.visibility = wgpu::ShaderStage::Fragment;
binding.hasDynamicOffset = false;
binding.multisampled = false;
binding.minBufferBindingSize = 4 * sizeof(float);
@ -471,6 +466,152 @@ TEST_F(GetBindGroupLayoutTests, DuplicateBinding) {
device.CreateRenderPipeline(&descriptor);
}
// Test that minBufferSize is set on the BGL and that the max of the min buffer sizes is used.
TEST_F(GetBindGroupLayoutTests, MinBufferSize) {
wgpu::ShaderModule vsModule4 =
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
#version 450
layout(set = 0, binding = 0) uniform UniformBuffer {
float pos;
};
void main() {})");
wgpu::ShaderModule vsModule64 =
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
#version 450
layout(set = 0, binding = 0) uniform UniformBuffer1 {
mat4 pos;
};
void main() {})");
wgpu::ShaderModule fsModule4 =
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
#version 450
layout(set = 0, binding = 0) uniform UniformBuffer {
float pos;
};
void main() {})");
wgpu::ShaderModule fsModule64 =
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
#version 450
layout(set = 0, binding = 0) uniform UniformBuffer {
mat4 pos;
};
void main() {})");
// Create BGLs with minBufferBindingSize 4 and 64.
wgpu::BindGroupLayoutEntry binding = {};
binding.binding = 0;
binding.type = wgpu::BindingType::UniformBuffer;
binding.visibility = wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
wgpu::BindGroupLayoutDescriptor desc = {};
desc.entryCount = 1;
desc.entries = &binding;
binding.minBufferBindingSize = 4;
wgpu::BindGroupLayout bgl4 = device.CreateBindGroupLayout(&desc);
binding.minBufferBindingSize = 64;
wgpu::BindGroupLayout bgl64 = device.CreateBindGroupLayout(&desc);
utils::ComboRenderPipelineDescriptor descriptor(device);
descriptor.layout = nullptr;
// Check with both stages using 4 bytes.
{
descriptor.vertexStage.module = vsModule4;
descriptor.cFragmentStage.module = fsModule4;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), bgl4.Get());
}
// Check that the max is taken between 4 and 64.
{
descriptor.vertexStage.module = vsModule64;
descriptor.cFragmentStage.module = fsModule4;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), bgl64.Get());
}
// Check that the order doesn't change that the max is taken.
{
descriptor.vertexStage.module = vsModule4;
descriptor.cFragmentStage.module = fsModule64;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), bgl64.Get());
}
}
// Test that the visibility is correctly aggregated if two stages have the exact same binding.
TEST_F(GetBindGroupLayoutTests, StageAggregation) {
wgpu::ShaderModule vsModuleNoSampler =
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
#version 450
void main() {})");
wgpu::ShaderModule vsModuleSampler =
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
#version 450
layout(set = 0, binding = 0) uniform sampler mySampler;
void main() {})");
wgpu::ShaderModule fsModuleNoSampler =
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
#version 450
void main() {})");
wgpu::ShaderModule fsModuleSampler =
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
#version 450
layout(set = 0, binding = 0) uniform sampler mySampler;
void main() {})");
// Create BGLs with minBufferBindingSize 4 and 64.
wgpu::BindGroupLayoutEntry binding = {};
binding.binding = 0;
binding.type = wgpu::BindingType::Sampler;
wgpu::BindGroupLayoutDescriptor desc = {};
desc.entryCount = 1;
desc.entries = &binding;
utils::ComboRenderPipelineDescriptor descriptor(device);
descriptor.layout = nullptr;
// Check with only the vertex shader using the sampler
{
descriptor.vertexStage.module = vsModuleSampler;
descriptor.cFragmentStage.module = fsModuleNoSampler;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
binding.visibility = wgpu::ShaderStage::Vertex;
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get());
}
// Check with only the fragment shader using the sampler
{
descriptor.vertexStage.module = vsModuleNoSampler;
descriptor.cFragmentStage.module = fsModuleSampler;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
binding.visibility = wgpu::ShaderStage::Fragment;
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get());
}
// Check with both shaders using the sampler
{
descriptor.vertexStage.module = vsModuleSampler;
descriptor.cFragmentStage.module = fsModuleSampler;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&descriptor);
binding.visibility = wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Vertex;
EXPECT_EQ(pipeline.GetBindGroupLayout(0).Get(), device.CreateBindGroupLayout(&desc).Get());
}
}
// Test it is invalid to have conflicting binding types in the shaders.
TEST_F(GetBindGroupLayoutTests, ConflictingBindingType) {
wgpu::ShaderModule vsModule =
@ -500,8 +641,7 @@ TEST_F(GetBindGroupLayoutTests, ConflictingBindingType) {
}
// Test it is invalid to have conflicting binding texture multisampling in the shaders.
// TODO: Support multisampling
TEST_F(GetBindGroupLayoutTests, DISABLED_ConflictingBindingTextureMultisampling) {
TEST_F(GetBindGroupLayoutTests, ConflictingBindingTextureMultisampling) {
wgpu::ShaderModule vsModule =
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
#version 450