Add minimum buffer size validation
Bug: dawn:459 Change-Id: I755cc0ada7be7b1cb71724cb410ab0c3a88cea24 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/22421 Commit-Queue: Idan Raiter <idanr@google.com> Reviewed-by: Austin Eng <enga@chromium.org> Reviewed-by: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
parent
b75075ac7b
commit
f434fdc6ed
|
@ -95,7 +95,8 @@
|
|||
{"name": "multisampled", "type": "bool", "default": "false"},
|
||||
{"name": "view dimension", "type": "texture view dimension", "default": "undefined"},
|
||||
{"name": "texture component type", "type": "texture component type", "default": "float"},
|
||||
{"name": "storage texture format", "type": "texture format", "default": "undefined"}
|
||||
{"name": "storage texture format", "type": "texture format", "default": "undefined"},
|
||||
{"name": "min buffer binding size", "type": "uint64_t", "default": "0"}
|
||||
]
|
||||
},
|
||||
"bind group layout descriptor": {
|
||||
|
|
|
@ -31,7 +31,8 @@ namespace dawn_native {
|
|||
|
||||
MaybeError ValidateBufferBinding(const DeviceBase* device,
|
||||
const BindGroupEntry& entry,
|
||||
wgpu::BufferUsage requiredUsage) {
|
||||
wgpu::BufferUsage requiredUsage,
|
||||
const BindingInfo& bindingInfo) {
|
||||
if (entry.buffer == nullptr || entry.sampler != nullptr ||
|
||||
entry.textureView != nullptr) {
|
||||
return DAWN_VALIDATION_ERROR("expected buffer binding");
|
||||
|
@ -70,6 +71,14 @@ namespace dawn_native {
|
|||
return DAWN_VALIDATION_ERROR("buffer binding usage mismatch");
|
||||
}
|
||||
|
||||
if (bindingSize < bindingInfo.minBufferBindingSize) {
|
||||
return DAWN_VALIDATION_ERROR(
|
||||
"Binding size smaller than minimum buffer size: binding " +
|
||||
std::to_string(entry.binding) + " given " + std::to_string(bindingSize) +
|
||||
" bytes, required " + std::to_string(bindingInfo.minBufferBindingSize) +
|
||||
" bytes");
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -182,11 +191,13 @@ namespace dawn_native {
|
|||
// Perform binding-type specific validation.
|
||||
switch (bindingInfo.type) {
|
||||
case wgpu::BindingType::UniformBuffer:
|
||||
DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Uniform));
|
||||
DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Uniform,
|
||||
bindingInfo));
|
||||
break;
|
||||
case wgpu::BindingType::StorageBuffer:
|
||||
case wgpu::BindingType::ReadonlyStorageBuffer:
|
||||
DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Storage));
|
||||
DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Storage,
|
||||
bindingInfo));
|
||||
break;
|
||||
case wgpu::BindingType::SampledTexture:
|
||||
DAWN_TRY(ValidateTextureBinding(device, entry, wgpu::TextureUsage::Sampled,
|
||||
|
@ -264,6 +275,16 @@ namespace dawn_native {
|
|||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t packedIdx = 0;
|
||||
for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
|
||||
++bindingIndex) {
|
||||
if (descriptor->layout->GetBindingInfo(bindingIndex).minBufferBindingSize == 0) {
|
||||
mBindingData.unverifiedBufferSizes[packedIdx] =
|
||||
mBindingData.bufferData[bindingIndex].size;
|
||||
++packedIdx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupBase::~BindGroupBase() {
|
||||
|
@ -302,6 +323,11 @@ namespace dawn_native {
|
|||
return mLayout.Get();
|
||||
}
|
||||
|
||||
const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
|
||||
ASSERT(!IsError());
|
||||
return mBindingData.unverifiedBufferSizes;
|
||||
}
|
||||
|
||||
BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
|
||||
ASSERT(!IsError());
|
||||
ASSERT(bindingIndex < mLayout->GetBindingCount());
|
||||
|
|
|
@ -48,6 +48,7 @@ namespace dawn_native {
|
|||
BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
|
||||
SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
|
||||
TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
|
||||
const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
|
||||
|
||||
protected:
|
||||
// To save memory, the size of a bind group is dynamically determined and the bind group is
|
||||
|
|
|
@ -269,7 +269,8 @@ namespace dawn_native {
|
|||
|
||||
void HashCombineBindingInfo(size_t* hash, const BindingInfo& info) {
|
||||
HashCombine(hash, info.hasDynamicOffset, info.multisampled, info.visibility, info.type,
|
||||
info.textureComponentType, info.viewDimension, info.storageTextureFormat);
|
||||
info.textureComponentType, info.viewDimension, info.storageTextureFormat,
|
||||
info.minBufferBindingSize);
|
||||
}
|
||||
|
||||
bool operator!=(const BindingInfo& a, const BindingInfo& b) {
|
||||
|
@ -279,7 +280,8 @@ namespace dawn_native {
|
|||
a.type != b.type || //
|
||||
a.textureComponentType != b.textureComponentType || //
|
||||
a.viewDimension != b.viewDimension || //
|
||||
a.storageTextureFormat != b.storageTextureFormat;
|
||||
a.storageTextureFormat != b.storageTextureFormat || //
|
||||
a.minBufferBindingSize != b.minBufferBindingSize;
|
||||
}
|
||||
|
||||
bool IsBufferBinding(wgpu::BindingType bindingType) {
|
||||
|
@ -345,6 +347,9 @@ namespace dawn_native {
|
|||
if (a.storageTextureFormat != b.storageTextureFormat) {
|
||||
return a.storageTextureFormat < b.storageTextureFormat;
|
||||
}
|
||||
if (a.minBufferBindingSize != b.minBufferBindingSize) {
|
||||
return a.minBufferBindingSize < b.minBufferBindingSize;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -382,11 +387,13 @@ namespace dawn_native {
|
|||
|
||||
for (BindingIndex i{0}; i < mBindingCount; ++i) {
|
||||
const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
|
||||
mBindingInfo[i].binding = BindingNumber(binding.binding);
|
||||
mBindingInfo[i].type = binding.type;
|
||||
mBindingInfo[i].visibility = binding.visibility;
|
||||
mBindingInfo[i].textureComponentType =
|
||||
Format::TextureComponentTypeToFormatType(binding.textureComponentType);
|
||||
mBindingInfo[i].storageTextureFormat = binding.storageTextureFormat;
|
||||
mBindingInfo[i].minBufferBindingSize = binding.minBufferBindingSize;
|
||||
|
||||
switch (binding.type) {
|
||||
case wgpu::BindingType::UniformBuffer:
|
||||
|
@ -395,6 +402,9 @@ namespace dawn_native {
|
|||
// Buffers must be contiguously packed at the start of the binding info.
|
||||
ASSERT(mBufferCount == i);
|
||||
++mBufferCount;
|
||||
if (binding.minBufferBindingSize == 0) {
|
||||
++mUnverifiedBufferCount;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -490,6 +500,10 @@ namespace dawn_native {
|
|||
return mBindingCount;
|
||||
}
|
||||
|
||||
BindingIndex BindGroupLayoutBase::GetBufferCount() const {
|
||||
return mBufferCount;
|
||||
}
|
||||
|
||||
BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
|
||||
// This is a binding index because dynamic buffers are packed at the front of the binding
|
||||
// info.
|
||||
|
@ -504,12 +518,23 @@ namespace dawn_native {
|
|||
return mDynamicStorageBufferCount;
|
||||
}
|
||||
|
||||
uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
|
||||
return mUnverifiedBufferCount;
|
||||
}
|
||||
|
||||
size_t BindGroupLayoutBase::GetBindingDataSize() const {
|
||||
// | ------ buffer-specific ----------| ------------ object pointers -------------|
|
||||
// | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
|
||||
// Followed by:
|
||||
// |---------buffer size array--------|
|
||||
// |-uint64_t[mUnverifiedBufferCount]-|
|
||||
size_t objectPointerStart = static_cast<uint32_t>(mBufferCount) * sizeof(BufferBindingData);
|
||||
ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
|
||||
return objectPointerStart + static_cast<uint32_t>(mBindingCount) * sizeof(Ref<ObjectBase>);
|
||||
size_t bufferSizeArrayStart = Align(
|
||||
objectPointerStart + static_cast<uint32_t>(mBindingCount) * sizeof(Ref<ObjectBase>),
|
||||
sizeof(uint64_t));
|
||||
ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
|
||||
return bufferSizeArrayStart + mUnverifiedBufferCount * sizeof(uint64_t);
|
||||
}
|
||||
|
||||
BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
|
||||
|
@ -517,11 +542,17 @@ namespace dawn_native {
|
|||
BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
|
||||
auto bindings =
|
||||
reinterpret_cast<Ref<ObjectBase>*>(bufferData + static_cast<uint32_t>(mBufferCount));
|
||||
uint64_t* unverifiedBufferSizes =
|
||||
AlignPtr(reinterpret_cast<uint64_t*>(bindings + static_cast<uint32_t>(mBindingCount)),
|
||||
sizeof(uint64_t));
|
||||
|
||||
ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
|
||||
ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
|
||||
ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
|
||||
|
||||
return {{bufferData, mBufferCount}, {bindings, mBindingCount}};
|
||||
return {{bufferData, mBufferCount},
|
||||
{bindings, mBindingCount},
|
||||
{unverifiedBufferSizes, mUnverifiedBufferCount}};
|
||||
}
|
||||
|
||||
} // namespace dawn_native
|
||||
|
|
|
@ -79,10 +79,12 @@ namespace dawn_native {
|
|||
};
|
||||
|
||||
BindingIndex GetBindingCount() const;
|
||||
BindingIndex GetBufferCount() const;
|
||||
// Returns |BindingIndex| because dynamic buffers are packed at the front.
|
||||
BindingIndex GetDynamicBufferCount() const;
|
||||
uint32_t GetDynamicUniformBufferCount() const;
|
||||
uint32_t GetDynamicStorageBufferCount() const;
|
||||
uint32_t GetUnverifiedBufferCount() const;
|
||||
|
||||
struct BufferBindingData {
|
||||
uint64_t offset;
|
||||
|
@ -92,6 +94,7 @@ namespace dawn_native {
|
|||
struct BindingDataPointers {
|
||||
ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
|
||||
ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
|
||||
ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
|
||||
};
|
||||
|
||||
// Compute the amount of space / alignment required to store bindings for a bind group of
|
||||
|
@ -119,6 +122,7 @@ namespace dawn_native {
|
|||
|
||||
BindingIndex mBindingCount;
|
||||
BindingIndex mBufferCount{0}; // |BindingIndex| because buffers are packed at the front.
|
||||
uint32_t mUnverifiedBufferCount = 0; // Buffers with minimum buffer size unspecified
|
||||
uint32_t mDynamicUniformBufferCount = 0;
|
||||
uint32_t mDynamicStorageBufferCount = 0;
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ namespace dawn_native {
|
|||
static constexpr BindingIndex kMaxBindingsPerGroupTyped = BindingIndex(kMaxBindingsPerGroup);
|
||||
|
||||
struct BindingInfo {
|
||||
BindingNumber binding;
|
||||
wgpu::ShaderStage visibility;
|
||||
wgpu::BindingType type;
|
||||
Format::Type textureComponentType = Format::Type::Float;
|
||||
|
@ -40,8 +41,12 @@ namespace dawn_native {
|
|||
wgpu::TextureFormat storageTextureFormat = wgpu::TextureFormat::Undefined;
|
||||
bool hasDynamicOffset = false;
|
||||
bool multisampled = false;
|
||||
uint64_t minBufferBindingSize = 0;
|
||||
};
|
||||
|
||||
// For buffer size validation
|
||||
using RequiredBufferSizes = std::array<std::vector<uint64_t>, kMaxBindGroups>;
|
||||
|
||||
} // namespace dawn_native
|
||||
|
||||
#endif // DAWNNATIVE_BINDINGINFO_H_
|
||||
|
|
|
@ -24,6 +24,21 @@
|
|||
|
||||
namespace dawn_native {
|
||||
|
||||
namespace {
|
||||
bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
|
||||
const std::vector<uint64_t>& pipelineMinimumBufferSizes) {
|
||||
ASSERT(unverifiedBufferSizes.size() == pipelineMinimumBufferSizes.size());
|
||||
|
||||
for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
|
||||
if (unverifiedBufferSizes[i] < pipelineMinimumBufferSizes[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
enum ValidationAspect {
|
||||
VALIDATION_ASPECT_PIPELINE,
|
||||
VALIDATION_ASPECT_BIND_GROUPS,
|
||||
|
@ -87,7 +102,9 @@ namespace dawn_native {
|
|||
|
||||
for (uint32_t i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
|
||||
if (mBindgroups[i] == nullptr ||
|
||||
mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout()) {
|
||||
mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
|
||||
!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
|
||||
(*mMinimumBufferSizes)[i])) {
|
||||
matches = false;
|
||||
break;
|
||||
}
|
||||
|
@ -123,7 +140,27 @@ namespace dawn_native {
|
|||
}
|
||||
|
||||
if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
|
||||
return DAWN_VALIDATION_ERROR("Missing bind group");
|
||||
for (uint32_t i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
|
||||
if (mBindgroups[i] == nullptr) {
|
||||
return DAWN_VALIDATION_ERROR("Missing bind group " + std::to_string(i));
|
||||
} else if (mLastPipelineLayout->GetBindGroupLayout(i) !=
|
||||
mBindgroups[i]->GetLayout()) {
|
||||
return DAWN_VALIDATION_ERROR(
|
||||
"Pipeline and bind group layout doesn't match for bind group " +
|
||||
std::to_string(i));
|
||||
} else if (!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
|
||||
(*mMinimumBufferSizes)[i])) {
|
||||
return DAWN_VALIDATION_ERROR("Binding sizes too small for bind group " +
|
||||
std::to_string(i));
|
||||
}
|
||||
}
|
||||
|
||||
// The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
|
||||
// It returns the first invalid state found. We shouldn't be able to reach this line
|
||||
// because to have invalid aspects one of the above conditions must have failed earlier.
|
||||
// If this is reached, make sure lazy aspects and the error checks above are consistent.
|
||||
UNREACHABLE();
|
||||
return DAWN_VALIDATION_ERROR("Bind groups invalid");
|
||||
}
|
||||
|
||||
if (aspects[VALIDATION_ASPECT_PIPELINE]) {
|
||||
|
@ -157,6 +194,7 @@ namespace dawn_native {
|
|||
|
||||
void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
|
||||
mLastPipelineLayout = pipeline->GetLayout();
|
||||
mMinimumBufferSizes = &pipeline->GetMinimumBufferSizes();
|
||||
|
||||
mAspects.set(VALIDATION_ASPECT_PIPELINE);
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
|
||||
|
||||
#include "common/Constants.h"
|
||||
#include "dawn_native/BindingInfo.h"
|
||||
#include "dawn_native/Error.h"
|
||||
#include "dawn_native/Forward.h"
|
||||
|
||||
|
@ -57,6 +58,8 @@ namespace dawn_native {
|
|||
|
||||
PipelineLayoutBase* mLastPipelineLayout = nullptr;
|
||||
RenderPipelineBase* mLastRenderPipeline = nullptr;
|
||||
|
||||
const RequiredBufferSizes* mMinimumBufferSizes = nullptr;
|
||||
};
|
||||
|
||||
} // namespace dawn_native
|
||||
|
|
|
@ -19,6 +19,13 @@
|
|||
|
||||
namespace dawn_native {
|
||||
|
||||
namespace {
|
||||
RequiredBufferSizes ComputeMinBufferSizes(const ComputePipelineDescriptor* descriptor) {
|
||||
return descriptor->computeStage.module->ComputeRequiredBufferSizesForLayout(
|
||||
descriptor->layout);
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
|
||||
const ComputePipelineDescriptor* descriptor) {
|
||||
if (descriptor->nextInChain != nullptr) {
|
||||
|
@ -38,7 +45,10 @@ namespace dawn_native {
|
|||
|
||||
ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
|
||||
const ComputePipelineDescriptor* descriptor)
|
||||
: PipelineBase(device, descriptor->layout, wgpu::ShaderStage::Compute),
|
||||
: PipelineBase(device,
|
||||
descriptor->layout,
|
||||
wgpu::ShaderStage::Compute,
|
||||
ComputeMinBufferSizes(descriptor)),
|
||||
mModule(descriptor->computeStage.module),
|
||||
mEntryPoint(descriptor->computeStage.entryPoint) {
|
||||
}
|
||||
|
|
|
@ -43,8 +43,12 @@ namespace dawn_native {
|
|||
|
||||
PipelineBase::PipelineBase(DeviceBase* device,
|
||||
PipelineLayoutBase* layout,
|
||||
wgpu::ShaderStage stages)
|
||||
: CachedObject(device), mStageMask(stages), mLayout(layout) {
|
||||
wgpu::ShaderStage stages,
|
||||
RequiredBufferSizes minimumBufferSizes)
|
||||
: CachedObject(device),
|
||||
mStageMask(stages),
|
||||
mLayout(layout),
|
||||
mMinimumBufferSizes(std::move(minimumBufferSizes)) {
|
||||
}
|
||||
|
||||
PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
|
@ -66,6 +70,11 @@ namespace dawn_native {
|
|||
return mLayout.Get();
|
||||
}
|
||||
|
||||
const RequiredBufferSizes& PipelineBase::GetMinimumBufferSizes() const {
|
||||
ASSERT(!IsError());
|
||||
return mMinimumBufferSizes;
|
||||
}
|
||||
|
||||
MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
|
||||
DAWN_TRY(GetDevice()->ValidateIsAlive());
|
||||
DAWN_TRY(GetDevice()->ValidateObject(this));
|
||||
|
|
|
@ -39,9 +39,13 @@ namespace dawn_native {
|
|||
PipelineLayoutBase* GetLayout();
|
||||
const PipelineLayoutBase* GetLayout() const;
|
||||
BindGroupLayoutBase* GetBindGroupLayout(uint32_t groupIndex);
|
||||
const RequiredBufferSizes& GetMinimumBufferSizes() const;
|
||||
|
||||
protected:
|
||||
PipelineBase(DeviceBase* device, PipelineLayoutBase* layout, wgpu::ShaderStage stages);
|
||||
PipelineBase(DeviceBase* device,
|
||||
PipelineLayoutBase* layout,
|
||||
wgpu::ShaderStage stages,
|
||||
RequiredBufferSizes bufferSizes);
|
||||
PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
|
||||
|
||||
private:
|
||||
|
@ -49,6 +53,7 @@ namespace dawn_native {
|
|||
|
||||
wgpu::ShaderStage mStageMask;
|
||||
Ref<PipelineLayoutBase> mLayout;
|
||||
RequiredBufferSizes mMinimumBufferSizes;
|
||||
};
|
||||
|
||||
} // namespace dawn_native
|
||||
|
|
|
@ -25,7 +25,9 @@ namespace dawn_native {
|
|||
|
||||
namespace {
|
||||
|
||||
bool operator==(const BindGroupLayoutEntry& lhs, const BindGroupLayoutEntry& rhs) {
|
||||
bool InferredBindGroupLayoutEntriesCompatible(const BindGroupLayoutEntry& lhs,
|
||||
const BindGroupLayoutEntry& rhs) {
|
||||
// Minimum buffer binding size excluded because we take the maximum seen across stages
|
||||
return lhs.binding == rhs.binding && lhs.visibility == rhs.visibility &&
|
||||
lhs.type == rhs.type && lhs.hasDynamicOffset == rhs.hasDynamicOffset &&
|
||||
lhs.multisampled == rhs.multisampled && lhs.viewDimension == rhs.viewDimension &&
|
||||
|
@ -170,18 +172,29 @@ namespace dawn_native {
|
|||
bindingSlot.textureComponentType =
|
||||
Format::FormatTypeToTextureComponentType(bindingInfo.textureComponentType);
|
||||
bindingSlot.storageTextureFormat = bindingInfo.storageTextureFormat;
|
||||
bindingSlot.minBufferBindingSize = bindingInfo.minBufferBindingSize;
|
||||
|
||||
{
|
||||
const auto& it = usedBindingsMap[group].find(bindingNumber);
|
||||
if (it != usedBindingsMap[group].end()) {
|
||||
if (bindingSlot == entryData[group][it->second]) {
|
||||
// Already used and the data is the same. Continue.
|
||||
continue;
|
||||
} else {
|
||||
BindGroupLayoutEntry* existingEntry = &entryData[group][it->second];
|
||||
|
||||
// Check if any properties are incompatible with existing entry
|
||||
// If compatible, we will merge some properties
|
||||
if (!InferredBindGroupLayoutEntriesCompatible(*existingEntry,
|
||||
bindingSlot)) {
|
||||
return DAWN_VALIDATION_ERROR(
|
||||
"Duplicate binding in default pipeline layout initialization "
|
||||
"not compatible with previous declaration");
|
||||
}
|
||||
|
||||
// Use the max |minBufferBindingSize| we find
|
||||
existingEntry->minBufferBindingSize =
|
||||
std::max(existingEntry->minBufferBindingSize,
|
||||
bindingSlot.minBufferBindingSize);
|
||||
|
||||
// Already used slot, continue
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -183,6 +183,29 @@ namespace dawn_native {
|
|||
return {};
|
||||
}
|
||||
|
||||
RequiredBufferSizes ComputeMinBufferSizes(const RenderPipelineDescriptor* descriptor) {
|
||||
RequiredBufferSizes bufferSizes =
|
||||
descriptor->vertexStage.module->ComputeRequiredBufferSizesForLayout(
|
||||
descriptor->layout);
|
||||
|
||||
// Merge the two buffer size requirements by taking the larger element from each
|
||||
if (descriptor->fragmentStage != nullptr) {
|
||||
RequiredBufferSizes fragmentSizes =
|
||||
descriptor->fragmentStage->module->ComputeRequiredBufferSizesForLayout(
|
||||
descriptor->layout);
|
||||
|
||||
for (uint32_t group = 0; group < bufferSizes.size(); ++group) {
|
||||
ASSERT(bufferSizes[group].size() == fragmentSizes[group].size());
|
||||
for (size_t i = 0; i < bufferSizes[group].size(); ++i) {
|
||||
bufferSizes[group][i] =
|
||||
std::max(bufferSizes[group][i], fragmentSizes[group][i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bufferSizes;
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// Helper functions
|
||||
|
@ -380,7 +403,8 @@ namespace dawn_native {
|
|||
const RenderPipelineDescriptor* descriptor)
|
||||
: PipelineBase(device,
|
||||
descriptor->layout,
|
||||
wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment),
|
||||
wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
|
||||
ComputeMinBufferSizes(descriptor)),
|
||||
mAttachmentState(device->GetOrCreateAttachmentState(descriptor)),
|
||||
mPrimitiveTopology(descriptor->primitiveTopology),
|
||||
mSampleMask(descriptor->sampleMask),
|
||||
|
|
|
@ -558,6 +558,11 @@ namespace dawn_native {
|
|||
info->viewDimension = ToWGPUTextureViewDimension(binding.texture_dimension);
|
||||
break;
|
||||
}
|
||||
case wgpu::BindingType::UniformBuffer:
|
||||
case wgpu::BindingType::StorageBuffer:
|
||||
case wgpu::BindingType::ReadonlyStorageBuffer:
|
||||
info->minBufferBindingSize = binding.minimum_buffer_size;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -712,6 +717,15 @@ namespace dawn_native {
|
|||
info->id = resource.id;
|
||||
info->base_type_id = resource.base_type_id;
|
||||
|
||||
if (bindingType == wgpu::BindingType::UniformBuffer ||
|
||||
bindingType == wgpu::BindingType::StorageBuffer ||
|
||||
bindingType == wgpu::BindingType::ReadonlyStorageBuffer) {
|
||||
// Determine buffer size, with a minimum of 1 element in the runtime array
|
||||
spirv_cross::SPIRType type = compiler.get_type(info->base_type_id);
|
||||
info->minBufferBindingSize =
|
||||
compiler.get_declared_struct_size_runtime_array(type, 1);
|
||||
}
|
||||
|
||||
switch (bindingType) {
|
||||
case wgpu::BindingType::SampledTexture: {
|
||||
spirv_cross::SPIRType::ImageType imageType =
|
||||
|
@ -867,6 +881,47 @@ namespace dawn_native {
|
|||
return mExecutionModel;
|
||||
}
|
||||
|
||||
RequiredBufferSizes ShaderModuleBase::ComputeRequiredBufferSizesForLayout(
|
||||
const PipelineLayoutBase* layout) const {
|
||||
RequiredBufferSizes bufferSizes;
|
||||
for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
|
||||
bufferSizes[group] =
|
||||
GetBindGroupMinBufferSizes(mBindingInfo[group], layout->GetBindGroupLayout(group));
|
||||
}
|
||||
|
||||
return bufferSizes;
|
||||
}
|
||||
|
||||
std::vector<uint64_t> ShaderModuleBase::GetBindGroupMinBufferSizes(
|
||||
const BindingInfoMap& shaderMap,
|
||||
const BindGroupLayoutBase* layout) const {
|
||||
std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
|
||||
uint32_t packedIdx = 0;
|
||||
|
||||
for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount();
|
||||
++bindingIndex) {
|
||||
const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
|
||||
if (bindingInfo.minBufferBindingSize != 0) {
|
||||
// Skip bindings that have minimum buffer size set in the layout
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT(packedIdx < requiredBufferSizes.size());
|
||||
const auto& shaderInfo = shaderMap.find(bindingInfo.binding);
|
||||
if (shaderInfo != shaderMap.end()) {
|
||||
requiredBufferSizes[packedIdx] = shaderInfo->second.minBufferBindingSize;
|
||||
} else {
|
||||
// We have to include buffers if they are included in the bind group's
|
||||
// packed vector. We don't actually need to check these at draw time, so
|
||||
// if this is a problem in the future we can optimize it further.
|
||||
requiredBufferSizes[packedIdx] = 0;
|
||||
}
|
||||
++packedIdx;
|
||||
}
|
||||
|
||||
return requiredBufferSizes;
|
||||
}
|
||||
|
||||
MaybeError ShaderModuleBase::ValidateCompatibilityWithPipelineLayout(
|
||||
const PipelineLayoutBase* layout) const {
|
||||
ASSERT(!IsError());
|
||||
|
@ -889,7 +944,7 @@ namespace dawn_native {
|
|||
}
|
||||
|
||||
MaybeError ShaderModuleBase::ValidateCompatibilityWithBindGroupLayout(
|
||||
size_t group,
|
||||
uint32_t group,
|
||||
const BindGroupLayoutBase* layout) const {
|
||||
ASSERT(!IsError());
|
||||
|
||||
|
@ -980,7 +1035,16 @@ namespace dawn_native {
|
|||
|
||||
case wgpu::BindingType::UniformBuffer:
|
||||
case wgpu::BindingType::ReadonlyStorageBuffer:
|
||||
case wgpu::BindingType::StorageBuffer:
|
||||
case wgpu::BindingType::StorageBuffer: {
|
||||
if (bindingInfo.minBufferBindingSize != 0 &&
|
||||
moduleInfo.minBufferBindingSize > bindingInfo.minBufferBindingSize) {
|
||||
return DAWN_VALIDATION_ERROR(
|
||||
"The minimum buffer size of the bind group layout entry is smaller "
|
||||
"than " +
|
||||
GetShaderDeclarationString(group, bindingNumber));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case wgpu::BindingType::Sampler:
|
||||
case wgpu::BindingType::ComparisonSampler:
|
||||
break;
|
||||
|
|
|
@ -63,8 +63,8 @@ namespace dawn_native {
|
|||
using BindingInfo::visibility;
|
||||
};
|
||||
|
||||
using ModuleBindingInfo =
|
||||
std::array<std::map<BindingNumber, ShaderBindingInfo>, kMaxBindGroups>;
|
||||
using BindingInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
|
||||
using ModuleBindingInfo = std::array<BindingInfoMap, kMaxBindGroups>;
|
||||
|
||||
const ModuleBindingInfo& GetBindingInfo() const;
|
||||
const std::bitset<kMaxVertexAttributes>& GetUsedVertexAttributes() const;
|
||||
|
@ -77,6 +77,9 @@ namespace dawn_native {
|
|||
|
||||
MaybeError ValidateCompatibilityWithPipelineLayout(const PipelineLayoutBase* layout) const;
|
||||
|
||||
RequiredBufferSizes ComputeRequiredBufferSizesForLayout(
|
||||
const PipelineLayoutBase* layout) const;
|
||||
|
||||
// Functors necessary for the unordered_set<ShaderModuleBase*>-based cache.
|
||||
struct HashFunc {
|
||||
size_t operator()(const ShaderModuleBase* module) const;
|
||||
|
@ -99,9 +102,12 @@ namespace dawn_native {
|
|||
ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
|
||||
|
||||
MaybeError ValidateCompatibilityWithBindGroupLayout(
|
||||
size_t group,
|
||||
uint32_t group,
|
||||
const BindGroupLayoutBase* layout) const;
|
||||
|
||||
std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingInfoMap& shaderMap,
|
||||
const BindGroupLayoutBase* layout) const;
|
||||
|
||||
// Different implementations reflection into the shader depending on
|
||||
// whether using spvc, or directly accessing spirv-cross.
|
||||
MaybeError ExtractSpirvInfoWithSpvc();
|
||||
|
|
|
@ -189,6 +189,7 @@ test("dawn_unittests") {
|
|||
"unittests/validation/FenceValidationTests.cpp",
|
||||
"unittests/validation/GetBindGroupLayoutValidationTests.cpp",
|
||||
"unittests/validation/IndexBufferValidationTests.cpp",
|
||||
"unittests/validation/MinimumBufferSizeValidationTests.cpp",
|
||||
"unittests/validation/QuerySetValidationTests.cpp",
|
||||
"unittests/validation/QueueSubmitValidationTests.cpp",
|
||||
"unittests/validation/RenderBundleValidationTests.cpp",
|
||||
|
|
|
@ -877,7 +877,9 @@ class SetBindGroupValidationTest : public ValidationTest {
|
|||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
|
||||
renderPassEncoder.SetPipeline(renderPipeline);
|
||||
renderPassEncoder.SetBindGroup(0, bindGroup, count, offsets);
|
||||
if (bindGroup != nullptr) {
|
||||
renderPassEncoder.SetBindGroup(0, bindGroup, count, offsets);
|
||||
}
|
||||
renderPassEncoder.Draw(3);
|
||||
renderPassEncoder.EndPass();
|
||||
if (!expectation) {
|
||||
|
@ -896,7 +898,9 @@ class SetBindGroupValidationTest : public ValidationTest {
|
|||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
||||
computePassEncoder.SetPipeline(computePipeline);
|
||||
computePassEncoder.SetBindGroup(0, bindGroup, count, offsets);
|
||||
if (bindGroup != nullptr) {
|
||||
computePassEncoder.SetBindGroup(0, bindGroup, count, offsets);
|
||||
}
|
||||
computePassEncoder.Dispatch(1);
|
||||
computePassEncoder.EndPass();
|
||||
if (!expectation) {
|
||||
|
@ -926,6 +930,12 @@ TEST_F(SetBindGroupValidationTest, Basic) {
|
|||
TestComputePassBindGroup(bindGroup, offsets.data(), 3, true);
|
||||
}
|
||||
|
||||
// Draw/dispatch with a bind group missing is invalid
|
||||
TEST_F(SetBindGroupValidationTest, MissingBindGroup) {
|
||||
TestRenderPassBindGroup(nullptr, nullptr, 0, false);
|
||||
TestComputePassBindGroup(nullptr, nullptr, 0, false);
|
||||
}
|
||||
|
||||
// Setting bind group after a draw / dispatch should re-verify the layout is compatible
|
||||
TEST_F(SetBindGroupValidationTest, VerifyGroupIfChangedAfterAction) {
|
||||
// Set up the bind group
|
||||
|
|
|
@ -106,6 +106,7 @@ TEST_F(GetBindGroupLayoutTests, DefaultShaderStageAndDynamicOffsets) {
|
|||
binding.binding = 0;
|
||||
binding.type = wgpu::BindingType::UniformBuffer;
|
||||
binding.multisampled = false;
|
||||
binding.minBufferBindingSize = 4 * sizeof(float);
|
||||
|
||||
wgpu::BindGroupLayoutDescriptor desc = {};
|
||||
desc.entryCount = 1;
|
||||
|
@ -155,6 +156,7 @@ TEST_F(GetBindGroupLayoutTests, ComputePipeline) {
|
|||
binding.type = wgpu::BindingType::UniformBuffer;
|
||||
binding.visibility = kVisibilityAll;
|
||||
binding.hasDynamicOffset = false;
|
||||
binding.minBufferBindingSize = 4 * sizeof(float);
|
||||
|
||||
wgpu::BindGroupLayoutDescriptor desc = {};
|
||||
desc.entryCount = 1;
|
||||
|
@ -169,6 +171,7 @@ TEST_F(GetBindGroupLayoutTests, BindingType) {
|
|||
binding.binding = 0;
|
||||
binding.hasDynamicOffset = false;
|
||||
binding.multisampled = false;
|
||||
binding.minBufferBindingSize = 4 * sizeof(float);
|
||||
|
||||
wgpu::BindGroupLayoutDescriptor desc = {};
|
||||
desc.entryCount = 1;
|
||||
|
@ -213,6 +216,7 @@ TEST_F(GetBindGroupLayoutTests, BindingType) {
|
|||
EXPECT_EQ(device.CreateBindGroupLayout(&desc).Get(), pipeline.GetBindGroupLayout(0).Get());
|
||||
}
|
||||
|
||||
binding.minBufferBindingSize = 0;
|
||||
{
|
||||
binding.type = wgpu::BindingType::SampledTexture;
|
||||
wgpu::RenderPipeline pipeline = RenderPipelineFromFragmentShader(R"(
|
||||
|
@ -392,6 +396,7 @@ TEST_F(GetBindGroupLayoutTests, BindingIndices) {
|
|||
binding.visibility = kVisibilityAll;
|
||||
binding.hasDynamicOffset = false;
|
||||
binding.multisampled = false;
|
||||
binding.minBufferBindingSize = 4 * sizeof(float);
|
||||
|
||||
wgpu::BindGroupLayoutDescriptor desc = {};
|
||||
desc.entryCount = 1;
|
||||
|
|
|
@ -0,0 +1,587 @@
|
|||
// Copyright 2020 The Dawn Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "tests/unittests/validation/ValidationTest.h"
|
||||
|
||||
#include "common/Assert.h"
|
||||
#include "common/Constants.h"
|
||||
#include "utils/ComboRenderPipelineDescriptor.h"
|
||||
#include "utils/WGPUHelpers.h"
|
||||
|
||||
namespace {
|
||||
// Helper for describing bindings throughout the tests
|
||||
struct BindingDescriptor {
|
||||
uint32_t set;
|
||||
uint32_t binding;
|
||||
std::string text;
|
||||
uint64_t size;
|
||||
wgpu::BindingType type = wgpu::BindingType::StorageBuffer;
|
||||
wgpu::ShaderStage visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
|
||||
};
|
||||
|
||||
// Runs |func| with a modified version of |originalSizes| as an argument, adding |offset| to
|
||||
// each element one at a time This is useful to verify some behavior happens if any element is
|
||||
// offset from original
|
||||
template <typename F>
|
||||
void WithEachSizeOffsetBy(int64_t offset, const std::vector<uint64_t>& originalSizes, F func) {
|
||||
std::vector<uint64_t> modifiedSizes = originalSizes;
|
||||
for (size_t i = 0; i < originalSizes.size(); ++i) {
|
||||
if (offset < 0) {
|
||||
ASSERT(originalSizes[i] >= static_cast<uint64_t>(-offset));
|
||||
}
|
||||
// Run the function with an element offset, and restore element afterwards
|
||||
modifiedSizes[i] += offset;
|
||||
func(modifiedSizes);
|
||||
modifiedSizes[i] -= offset;
|
||||
}
|
||||
}
|
||||
|
||||
// Runs |func| with |correctSizes|, and an expectation of success and failure
|
||||
template <typename F>
|
||||
void CheckSizeBounds(const std::vector<uint64_t>& correctSizes, F func) {
|
||||
// To validate size:
|
||||
// Check invalid with bind group with one less
|
||||
// Check valid with bind group with correct size
|
||||
|
||||
// Make sure (every size - 1) produces an error
|
||||
WithEachSizeOffsetBy(-1, correctSizes,
|
||||
[&](const std::vector<uint64_t>& sizes) { func(sizes, false); });
|
||||
|
||||
// Make sure correct sizes work
|
||||
func(correctSizes, true);
|
||||
|
||||
// Make sure (every size + 1) works
|
||||
WithEachSizeOffsetBy(1, correctSizes,
|
||||
[&](const std::vector<uint64_t>& sizes) { func(sizes, true); });
|
||||
}
|
||||
|
||||
// Convert binding type to a glsl string
|
||||
std::string BindingTypeToStr(wgpu::BindingType type) {
|
||||
switch (type) {
|
||||
case wgpu::BindingType::UniformBuffer:
|
||||
return "uniform";
|
||||
case wgpu::BindingType::StorageBuffer:
|
||||
return "buffer";
|
||||
case wgpu::BindingType::ReadonlyStorageBuffer:
|
||||
return "readonly buffer";
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a bind group with given bindings for shader text
|
||||
std::string GenerateBindingString(const std::string& layout,
|
||||
const std::vector<BindingDescriptor>& bindings) {
|
||||
std::ostringstream ostream;
|
||||
size_t ctr = 0;
|
||||
for (const BindingDescriptor& b : bindings) {
|
||||
ostream << "layout(" << layout << ", set = " << b.set << ", binding = " << b.binding
|
||||
<< ") " << BindingTypeToStr(b.type) << " b" << ctr++ << "{\n"
|
||||
<< b.text << ";\n};\n";
|
||||
}
|
||||
return ostream.str();
|
||||
}
|
||||
|
||||
// Used for adding custom types available throughout the tests
|
||||
static const std::string kStructs = "struct ThreeFloats{float f1; float f2; float f3;};\n";
|
||||
|
||||
// Creates a compute shader with given bindings
|
||||
std::string CreateComputeShaderWithBindings(const std::string& layoutType,
|
||||
const std::vector<BindingDescriptor>& bindings) {
|
||||
return R"(
|
||||
#version 450
|
||||
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||
)" +
|
||||
kStructs + GenerateBindingString(layoutType, bindings) + "void main() {}";
|
||||
}
|
||||
|
||||
// Creates a vertex shader with given bindings
|
||||
std::string CreateVertexShaderWithBindings(const std::string& layoutType,
|
||||
const std::vector<BindingDescriptor>& bindings) {
|
||||
return "#version 450\n" + kStructs + GenerateBindingString(layoutType, bindings) +
|
||||
"void main() {}";
|
||||
}
|
||||
|
||||
// Creates a fragment shader with given bindings
|
||||
std::string CreateFragmentShaderWithBindings(const std::string& layoutType,
|
||||
const std::vector<BindingDescriptor>& bindings) {
|
||||
return R"(
|
||||
#version 450
|
||||
layout(location = 0) out vec4 fragColor;
|
||||
)" +
|
||||
kStructs + GenerateBindingString(layoutType, bindings) + "void main() {}";
|
||||
}
|
||||
|
||||
// Concatenates vectors containing BindingDescriptor
|
||||
std::vector<BindingDescriptor> CombineBindings(
|
||||
std::initializer_list<std::vector<BindingDescriptor>> bindings) {
|
||||
std::vector<BindingDescriptor> result;
|
||||
for (const std::vector<BindingDescriptor>& b : bindings) {
|
||||
result.insert(result.end(), b.begin(), b.end());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
class MinBufferSizeTestsBase : public ValidationTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
ValidationTest::SetUp();
|
||||
}
|
||||
|
||||
wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
|
||||
wgpu::BufferDescriptor bufferDescriptor;
|
||||
bufferDescriptor.size = bufferSize;
|
||||
bufferDescriptor.usage = usage;
|
||||
|
||||
return device.CreateBuffer(&bufferDescriptor);
|
||||
}
|
||||
|
||||
// Creates compute pipeline given a layout and shader
|
||||
wgpu::ComputePipeline CreateComputePipeline(const std::vector<wgpu::BindGroupLayout>& layouts,
|
||||
const std::string& shader) {
|
||||
wgpu::ShaderModule csModule =
|
||||
utils::CreateShaderModule(device, utils::SingleShaderStage::Compute, shader.c_str());
|
||||
|
||||
wgpu::ComputePipelineDescriptor csDesc;
|
||||
csDesc.layout = nullptr;
|
||||
if (!layouts.empty()) {
|
||||
wgpu::PipelineLayoutDescriptor descriptor;
|
||||
descriptor.bindGroupLayoutCount = layouts.size();
|
||||
descriptor.bindGroupLayouts = layouts.data();
|
||||
csDesc.layout = device.CreatePipelineLayout(&descriptor);
|
||||
}
|
||||
csDesc.computeStage.module = csModule;
|
||||
csDesc.computeStage.entryPoint = "main";
|
||||
|
||||
return device.CreateComputePipeline(&csDesc);
|
||||
}
|
||||
|
||||
// Creates compute pipeline with default layout
|
||||
wgpu::ComputePipeline CreateComputePipelineWithDefaultLayout(const std::string& shader) {
|
||||
return CreateComputePipeline({}, shader);
|
||||
}
|
||||
|
||||
// Creates render pipeline give na layout and shaders
|
||||
wgpu::RenderPipeline CreateRenderPipeline(const std::vector<wgpu::BindGroupLayout>& layouts,
|
||||
const std::string& vertexShader,
|
||||
const std::string& fragShader) {
|
||||
wgpu::ShaderModule vsModule = utils::CreateShaderModule(
|
||||
device, utils::SingleShaderStage::Vertex, vertexShader.c_str());
|
||||
|
||||
wgpu::ShaderModule fsModule = utils::CreateShaderModule(
|
||||
device, utils::SingleShaderStage::Fragment, fragShader.c_str());
|
||||
|
||||
utils::ComboRenderPipelineDescriptor pipelineDescriptor(device);
|
||||
pipelineDescriptor.vertexStage.module = vsModule;
|
||||
pipelineDescriptor.cFragmentStage.module = fsModule;
|
||||
pipelineDescriptor.layout = nullptr;
|
||||
if (!layouts.empty()) {
|
||||
wgpu::PipelineLayoutDescriptor descriptor;
|
||||
descriptor.bindGroupLayoutCount = layouts.size();
|
||||
descriptor.bindGroupLayouts = layouts.data();
|
||||
pipelineDescriptor.layout = device.CreatePipelineLayout(&descriptor);
|
||||
}
|
||||
|
||||
return device.CreateRenderPipeline(&pipelineDescriptor);
|
||||
}
|
||||
|
||||
// Creates render pipeline with default layout
|
||||
wgpu::RenderPipeline CreateRenderPipelineWithDefaultLayout(const std::string& vertexShader,
|
||||
const std::string& fragShader) {
|
||||
return CreateRenderPipeline({}, vertexShader, fragShader);
|
||||
}
|
||||
|
||||
// Creates bind group layout with given minimum sizes for each binding
|
||||
wgpu::BindGroupLayout CreateBindGroupLayout(const std::vector<BindingDescriptor>& bindings,
|
||||
const std::vector<uint64_t>& minimumSizes) {
|
||||
ASSERT(bindings.size() == minimumSizes.size());
|
||||
std::vector<wgpu::BindGroupLayoutEntry> entries;
|
||||
|
||||
for (size_t i = 0; i < bindings.size(); ++i) {
|
||||
const BindingDescriptor& b = bindings[i];
|
||||
wgpu::BindGroupLayoutEntry e = {};
|
||||
e.binding = b.binding;
|
||||
e.type = b.type;
|
||||
e.visibility = b.visibility;
|
||||
e.minBufferBindingSize = minimumSizes[i];
|
||||
entries.push_back(e);
|
||||
}
|
||||
|
||||
wgpu::BindGroupLayoutDescriptor descriptor;
|
||||
descriptor.entryCount = static_cast<uint32_t>(entries.size());
|
||||
descriptor.entries = entries.data();
|
||||
return device.CreateBindGroupLayout(&descriptor);
|
||||
}
|
||||
|
||||
// Extract the first bind group from a compute shader
|
||||
wgpu::BindGroupLayout GetBGLFromComputeShader(const std::string& shader, uint32_t index) {
|
||||
wgpu::ComputePipeline pipeline = CreateComputePipelineWithDefaultLayout(shader);
|
||||
return pipeline.GetBindGroupLayout(index);
|
||||
}
|
||||
|
||||
// Extract the first bind group from a render pass
|
||||
wgpu::BindGroupLayout GetBGLFromRenderShaders(const std::string& vertexShader,
|
||||
const std::string& fragShader,
|
||||
uint32_t index) {
|
||||
wgpu::RenderPipeline pipeline =
|
||||
CreateRenderPipelineWithDefaultLayout(vertexShader, fragShader);
|
||||
return pipeline.GetBindGroupLayout(index);
|
||||
}
|
||||
|
||||
// Create a bind group with given binding sizes for each entry (backed by the same buffer)
|
||||
wgpu::BindGroup CreateBindGroup(wgpu::BindGroupLayout layout,
|
||||
const std::vector<BindingDescriptor>& bindings,
|
||||
const std::vector<uint64_t>& bindingSizes) {
|
||||
ASSERT(bindings.size() == bindingSizes.size());
|
||||
wgpu::Buffer buffer =
|
||||
CreateBuffer(1024, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage);
|
||||
|
||||
std::vector<wgpu::BindGroupEntry> entries;
|
||||
entries.reserve(bindingSizes.size());
|
||||
|
||||
for (uint32_t i = 0; i < bindingSizes.size(); ++i) {
|
||||
wgpu::BindGroupEntry entry = {};
|
||||
entry.binding = bindings[i].binding;
|
||||
entry.buffer = buffer;
|
||||
ASSERT(bindingSizes[i] < 1024);
|
||||
entry.size = bindingSizes[i];
|
||||
entries.push_back(entry);
|
||||
}
|
||||
|
||||
wgpu::BindGroupDescriptor descriptor;
|
||||
descriptor.layout = layout;
|
||||
descriptor.entryCount = entries.size();
|
||||
descriptor.entries = entries.data();
|
||||
|
||||
return device.CreateBindGroup(&descriptor);
|
||||
}
|
||||
|
||||
// Runs a single dispatch with given pipeline and bind group (to test lazy validation during
|
||||
// dispatch)
|
||||
void TestDispatch(const wgpu::ComputePipeline& computePipeline,
|
||||
const std::vector<wgpu::BindGroup>& bindGroups,
|
||||
bool expectation) {
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
||||
computePassEncoder.SetPipeline(computePipeline);
|
||||
for (size_t i = 0; i < bindGroups.size(); ++i) {
|
||||
computePassEncoder.SetBindGroup(i, bindGroups[i]);
|
||||
}
|
||||
computePassEncoder.Dispatch(1);
|
||||
computePassEncoder.EndPass();
|
||||
if (!expectation) {
|
||||
ASSERT_DEVICE_ERROR(commandEncoder.Finish());
|
||||
} else {
|
||||
commandEncoder.Finish();
|
||||
}
|
||||
}
|
||||
|
||||
// Runs a single draw with given pipeline and bind group (to test lazy validation during draw)
|
||||
void TestDraw(const wgpu::RenderPipeline& renderPipeline,
|
||||
const std::vector<wgpu::BindGroup>& bindGroups,
|
||||
bool expectation) {
|
||||
DummyRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
|
||||
renderPassEncoder.SetPipeline(renderPipeline);
|
||||
for (size_t i = 0; i < bindGroups.size(); ++i) {
|
||||
renderPassEncoder.SetBindGroup(i, bindGroups[i]);
|
||||
}
|
||||
renderPassEncoder.Draw(3);
|
||||
renderPassEncoder.EndPass();
|
||||
if (!expectation) {
|
||||
ASSERT_DEVICE_ERROR(commandEncoder.Finish());
|
||||
} else {
|
||||
commandEncoder.Finish();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// The check between BGL and pipeline at pipeline creation time
|
||||
class MinBufferSizePipelineCreationTests : public MinBufferSizeTestsBase {};
|
||||
|
||||
// Pipeline can be created if minimum buffer size in layout is specified as 0
|
||||
TEST_F(MinBufferSizePipelineCreationTests, ZeroMinBufferSize) {
|
||||
std::vector<BindingDescriptor> bindings = {{0, 0, "float a; float b", 8}, {0, 1, "float c", 4}};
|
||||
|
||||
std::string computeShader = CreateComputeShaderWithBindings("std140", bindings);
|
||||
std::string vertexShader = CreateVertexShaderWithBindings("std140", {});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings("std140", bindings);
|
||||
|
||||
wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {0, 0});
|
||||
CreateRenderPipeline({layout}, vertexShader, fragShader);
|
||||
CreateComputePipeline({layout}, computeShader);
|
||||
}
|
||||
|
||||
// Fail if layout given has non-zero minimum sizes smaller than shader requirements
|
||||
TEST_F(MinBufferSizePipelineCreationTests, LayoutSizesTooSmall) {
|
||||
std::vector<BindingDescriptor> bindings = {{0, 0, "float a; float b", 8}, {0, 1, "float c", 4}};
|
||||
|
||||
std::string computeShader = CreateComputeShaderWithBindings("std140", bindings);
|
||||
std::string vertexShader = CreateVertexShaderWithBindings("std140", {});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings("std140", bindings);
|
||||
|
||||
CheckSizeBounds({8, 4}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
|
||||
wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, sizes);
|
||||
if (expectation) {
|
||||
CreateRenderPipeline({layout}, vertexShader, fragShader);
|
||||
CreateComputePipeline({layout}, computeShader);
|
||||
} else {
|
||||
ASSERT_DEVICE_ERROR(CreateRenderPipeline({layout}, vertexShader, fragShader));
|
||||
ASSERT_DEVICE_ERROR(CreateComputePipeline({layout}, computeShader));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Fail if layout given has non-zero minimum sizes smaller than shader requirements
|
||||
TEST_F(MinBufferSizePipelineCreationTests, LayoutSizesTooSmallMultipleGroups) {
|
||||
std::vector<BindingDescriptor> bg0Bindings = {{0, 0, "float a; float b", 8},
|
||||
{0, 1, "float c", 4}};
|
||||
std::vector<BindingDescriptor> bg1Bindings = {{1, 0, "float d; float e; float f", 12},
|
||||
{1, 1, "mat2 g", 32}};
|
||||
std::vector<BindingDescriptor> bindings = CombineBindings({bg0Bindings, bg1Bindings});
|
||||
|
||||
std::string computeShader = CreateComputeShaderWithBindings("std140", bindings);
|
||||
std::string vertexShader = CreateVertexShaderWithBindings("std140", {});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings("std140", bindings);
|
||||
|
||||
CheckSizeBounds({8, 4, 12, 32}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
|
||||
wgpu::BindGroupLayout layout0 = CreateBindGroupLayout(bg0Bindings, {sizes[0], sizes[1]});
|
||||
wgpu::BindGroupLayout layout1 = CreateBindGroupLayout(bg1Bindings, {sizes[2], sizes[3]});
|
||||
if (expectation) {
|
||||
CreateRenderPipeline({layout0, layout1}, vertexShader, fragShader);
|
||||
CreateComputePipeline({layout0, layout1}, computeShader);
|
||||
} else {
|
||||
ASSERT_DEVICE_ERROR(CreateRenderPipeline({layout0, layout1}, vertexShader, fragShader));
|
||||
ASSERT_DEVICE_ERROR(CreateComputePipeline({layout0, layout1}, computeShader));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// The check between the BGL and the bindings at bindgroup creation time
|
||||
class MinBufferSizeBindGroupCreationTests : public MinBufferSizeTestsBase {};
|
||||
|
||||
// Fail if a binding is smaller than minimum buffer size
|
||||
TEST_F(MinBufferSizeBindGroupCreationTests, BindingTooSmall) {
|
||||
std::vector<BindingDescriptor> bindings = {{0, 0, "float a; float b", 8}, {0, 1, "float c", 4}};
|
||||
wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {8, 4});
|
||||
|
||||
CheckSizeBounds({8, 4}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
|
||||
if (expectation) {
|
||||
CreateBindGroup(layout, bindings, sizes);
|
||||
} else {
|
||||
ASSERT_DEVICE_ERROR(CreateBindGroup(layout, bindings, sizes));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Check two layouts with different minimum size are unequal
|
||||
TEST_F(MinBufferSizeBindGroupCreationTests, LayoutEquality) {
|
||||
auto MakeLayout = [&](uint64_t size) {
|
||||
return utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Compute, wgpu::BindingType::UniformBuffer, false, false,
|
||||
wgpu::TextureViewDimension::Undefined, wgpu::TextureComponentType::Float,
|
||||
wgpu::TextureFormat::Undefined, size}});
|
||||
};
|
||||
|
||||
EXPECT_EQ(MakeLayout(0).Get(), MakeLayout(0).Get());
|
||||
EXPECT_NE(MakeLayout(0).Get(), MakeLayout(4).Get());
|
||||
}
|
||||
|
||||
// The check between the bindgroup binding sizes and the required pipeline sizes at draw time
|
||||
class MinBufferSizeDrawTimeValidationTests : public MinBufferSizeTestsBase {};
|
||||
|
||||
// Fail if binding sizes are too small at draw time
|
||||
TEST_F(MinBufferSizeDrawTimeValidationTests, ZeroMinSizeAndTooSmallBinding) {
|
||||
std::vector<BindingDescriptor> bindings = {{0, 0, "float a; float b", 8}, {0, 1, "float c", 4}};
|
||||
|
||||
std::string computeShader = CreateComputeShaderWithBindings("std140", bindings);
|
||||
std::string vertexShader = CreateVertexShaderWithBindings("std140", {});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings("std140", bindings);
|
||||
|
||||
wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {0, 0});
|
||||
|
||||
wgpu::ComputePipeline computePipeline = CreateComputePipeline({layout}, computeShader);
|
||||
wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({layout}, vertexShader, fragShader);
|
||||
|
||||
CheckSizeBounds({8, 4}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
|
||||
wgpu::BindGroup bindGroup = CreateBindGroup(layout, bindings, sizes);
|
||||
TestDispatch(computePipeline, {bindGroup}, expectation);
|
||||
TestDraw(renderPipeline, {bindGroup}, expectation);
|
||||
});
|
||||
}
|
||||
|
||||
// Draw time validation works for non-contiguous bindings
|
||||
TEST_F(MinBufferSizeDrawTimeValidationTests, UnorderedBindings) {
|
||||
std::vector<BindingDescriptor> bindings = {{0, 2, "float a; float b", 8},
|
||||
{0, 0, "float c", 4},
|
||||
{0, 4, "float d; float e; float f", 12}};
|
||||
|
||||
std::string computeShader = CreateComputeShaderWithBindings("std140", bindings);
|
||||
std::string vertexShader = CreateVertexShaderWithBindings("std140", {});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings("std140", bindings);
|
||||
|
||||
wgpu::BindGroupLayout layout = CreateBindGroupLayout(bindings, {0, 0, 0});
|
||||
|
||||
wgpu::ComputePipeline computePipeline = CreateComputePipeline({layout}, computeShader);
|
||||
wgpu::RenderPipeline renderPipeline = CreateRenderPipeline({layout}, vertexShader, fragShader);
|
||||
|
||||
CheckSizeBounds({8, 4, 12}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
|
||||
wgpu::BindGroup bindGroup = CreateBindGroup(layout, bindings, sizes);
|
||||
TestDispatch(computePipeline, {bindGroup}, expectation);
|
||||
TestDraw(renderPipeline, {bindGroup}, expectation);
|
||||
});
|
||||
}
|
||||
|
||||
// Draw time validation works for multiple bind groups
|
||||
TEST_F(MinBufferSizeDrawTimeValidationTests, MultipleGroups) {
|
||||
std::vector<BindingDescriptor> bg0Bindings = {{0, 0, "float a; float b", 8},
|
||||
{0, 1, "float c", 4}};
|
||||
std::vector<BindingDescriptor> bg1Bindings = {{1, 0, "float d; float e; float f", 12},
|
||||
{1, 1, "mat2 g", 32}};
|
||||
std::vector<BindingDescriptor> bindings = CombineBindings({bg0Bindings, bg1Bindings});
|
||||
|
||||
std::string computeShader = CreateComputeShaderWithBindings("std140", bindings);
|
||||
std::string vertexShader = CreateVertexShaderWithBindings("std140", {});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings("std140", bindings);
|
||||
|
||||
wgpu::BindGroupLayout layout0 = CreateBindGroupLayout(bg0Bindings, {0, 0});
|
||||
wgpu::BindGroupLayout layout1 = CreateBindGroupLayout(bg1Bindings, {0, 0});
|
||||
|
||||
wgpu::ComputePipeline computePipeline =
|
||||
CreateComputePipeline({layout0, layout1}, computeShader);
|
||||
wgpu::RenderPipeline renderPipeline =
|
||||
CreateRenderPipeline({layout0, layout1}, vertexShader, fragShader);
|
||||
|
||||
CheckSizeBounds({8, 4, 12, 32}, [&](const std::vector<uint64_t>& sizes, bool expectation) {
|
||||
wgpu::BindGroup bindGroup0 = CreateBindGroup(layout0, bg0Bindings, {sizes[0], sizes[1]});
|
||||
wgpu::BindGroup bindGroup1 = CreateBindGroup(layout0, bg0Bindings, {sizes[2], sizes[3]});
|
||||
TestDispatch(computePipeline, {bindGroup0, bindGroup1}, expectation);
|
||||
TestDraw(renderPipeline, {bindGroup0, bindGroup1}, expectation);
|
||||
});
|
||||
}
|
||||
|
||||
// The correctness of minimum buffer size for the defaulted layout for a pipeline
|
||||
class MinBufferSizeDefaultLayoutTests : public MinBufferSizeTestsBase {
|
||||
public:
|
||||
// Checks BGL |layout| has minimum buffer sizes equal to sizes in |bindings|
|
||||
void CheckLayoutBindingSizeValidation(const wgpu::BindGroupLayout& layout,
|
||||
const std::vector<BindingDescriptor>& bindings) {
|
||||
std::vector<uint64_t> correctSizes;
|
||||
correctSizes.reserve(bindings.size());
|
||||
for (const BindingDescriptor& b : bindings) {
|
||||
correctSizes.push_back(b.size);
|
||||
}
|
||||
|
||||
CheckSizeBounds(correctSizes, [&](const std::vector<uint64_t>& sizes, bool expectation) {
|
||||
if (expectation) {
|
||||
CreateBindGroup(layout, bindings, sizes);
|
||||
} else {
|
||||
ASSERT_DEVICE_ERROR(CreateBindGroup(layout, bindings, sizes));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Constructs shaders with given layout type and bindings, checking defaulted sizes match sizes
|
||||
// in |bindings|
|
||||
void CheckShaderBindingSizeReflection(
|
||||
const std::string& layoutType,
|
||||
std::initializer_list<std::vector<BindingDescriptor>> bindings) {
|
||||
std::vector<BindingDescriptor> combinedBindings = CombineBindings(bindings);
|
||||
std::string computeShader = CreateComputeShaderWithBindings(layoutType, combinedBindings);
|
||||
std::string vertexShader = CreateVertexShaderWithBindings(layoutType, {});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings(layoutType, combinedBindings);
|
||||
|
||||
size_t i = 0;
|
||||
for (const std::vector<BindingDescriptor>& b : bindings) {
|
||||
wgpu::BindGroupLayout computeLayout = GetBGLFromComputeShader(computeShader, i);
|
||||
wgpu::BindGroupLayout renderLayout =
|
||||
GetBGLFromRenderShaders(vertexShader, fragShader, i);
|
||||
|
||||
CheckLayoutBindingSizeValidation(computeLayout, b);
|
||||
CheckLayoutBindingSizeValidation(renderLayout, b);
|
||||
++i;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Various bindings in std140 have correct minimum size reflection
|
||||
TEST_F(MinBufferSizeDefaultLayoutTests, std140Inferred) {
|
||||
CheckShaderBindingSizeReflection("std140", {{{0, 0, "float a", 4},
|
||||
{0, 1, "float b[]", 16},
|
||||
{0, 2, "mat2 c", 32},
|
||||
{0, 3, "int d; float e[]", 32},
|
||||
{0, 4, "ThreeFloats f", 12},
|
||||
{0, 5, "ThreeFloats g[]", 16}}});
|
||||
}
|
||||
|
||||
// Various bindings in std430 have correct minimum size reflection
|
||||
TEST_F(MinBufferSizeDefaultLayoutTests, std430Inferred) {
|
||||
CheckShaderBindingSizeReflection("std430", {{{0, 0, "float a", 4},
|
||||
{0, 1, "float b[]", 4},
|
||||
{0, 2, "mat2 c", 16},
|
||||
{0, 3, "int d; float e[]", 8},
|
||||
{0, 4, "ThreeFloats f", 12},
|
||||
{0, 5, "ThreeFloats g[]", 12}}});
|
||||
}
|
||||
|
||||
// Sizes are inferred for all binding types with std140 layout
|
||||
TEST_F(MinBufferSizeDefaultLayoutTests, std140BindingTypes) {
|
||||
CheckShaderBindingSizeReflection(
|
||||
"std140", {{{0, 0, "int d; float e[]", 32, wgpu::BindingType::UniformBuffer},
|
||||
{0, 1, "ThreeFloats f", 12, wgpu::BindingType::StorageBuffer},
|
||||
{0, 2, "ThreeFloats g[]", 16, wgpu::BindingType::ReadonlyStorageBuffer}}});
|
||||
}
|
||||
|
||||
// Sizes are inferred for all binding types with std430 layout
|
||||
TEST_F(MinBufferSizeDefaultLayoutTests, std430BindingTypes) {
|
||||
CheckShaderBindingSizeReflection(
|
||||
"std430", {{{0, 0, "float a", 4, wgpu::BindingType::StorageBuffer},
|
||||
{0, 1, "ThreeFloats b[]", 12, wgpu::BindingType::ReadonlyStorageBuffer}}});
|
||||
}
|
||||
|
||||
// Various bindings have correct size across multiple groups
|
||||
TEST_F(MinBufferSizeDefaultLayoutTests, std140MultipleBindGroups) {
|
||||
CheckShaderBindingSizeReflection("std140",
|
||||
{{{0, 0, "float a", 4}, {0, 1, "float b[]", 16}},
|
||||
{{1, 2, "mat2 c", 32}, {1, 3, "int d; float e[]", 32}},
|
||||
{{2, 4, "ThreeFloats f", 12}},
|
||||
{{3, 5, "ThreeFloats g[]", 16}}});
|
||||
}
|
||||
|
||||
// Various bindings have correct size across multiple groups
|
||||
TEST_F(MinBufferSizeDefaultLayoutTests, std430MultipleBindGroups) {
|
||||
CheckShaderBindingSizeReflection("std430",
|
||||
{{{0, 0, "float a", 4}, {0, 1, "float b[]", 4}},
|
||||
{{1, 2, "mat2 c", 16}, {1, 3, "int d; float e[]", 8}},
|
||||
{{2, 4, "ThreeFloats f", 12}},
|
||||
{{3, 5, "ThreeFloats g[]", 12}}});
|
||||
}
|
||||
|
||||
// Minimum size should be the max requirement of both vertex and fragment stages
|
||||
TEST_F(MinBufferSizeDefaultLayoutTests, RenderPassConsidersBothStages) {
|
||||
std::string vertexShader = CreateVertexShaderWithBindings(
|
||||
"std140", {{0, 0, "float a", 4, wgpu::BindingType::UniformBuffer},
|
||||
{0, 1, "float b[]", 16, wgpu::BindingType::UniformBuffer}});
|
||||
std::string fragShader = CreateFragmentShaderWithBindings(
|
||||
"std140", {{0, 0, "float a; float b", 8, wgpu::BindingType::UniformBuffer},
|
||||
{0, 1, "float c; float d", 8, wgpu::BindingType::UniformBuffer}});
|
||||
|
||||
wgpu::BindGroupLayout renderLayout = GetBGLFromRenderShaders(vertexShader, fragShader, 0);
|
||||
|
||||
CheckLayoutBindingSizeValidation(renderLayout, {{0, 0, "", 8}, {0, 1, "", 16}});
|
||||
}
|
|
@ -66,8 +66,8 @@ namespace {
|
|||
InitializeRenderPipelineDescriptor(&descriptor);
|
||||
pipeline = device.CreateRenderPipeline(&descriptor);
|
||||
|
||||
float data[4];
|
||||
wgpu::Buffer buffer = utils::CreateBufferFromData(device, data, 4 * sizeof(float),
|
||||
float data[8];
|
||||
wgpu::Buffer buffer = utils::CreateBufferFromData(device, data, 8 * sizeof(float),
|
||||
wgpu::BufferUsage::Uniform);
|
||||
|
||||
constexpr static float kVertices[] = {-1.f, 1.f, 1.f, -1.f, -1.f, 1.f};
|
||||
|
@ -84,13 +84,13 @@ namespace {
|
|||
utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
|
||||
wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage);
|
||||
|
||||
bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer, 0, 4 * sizeof(float)}});
|
||||
bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer, 0, 8 * sizeof(float)}});
|
||||
bg1 = utils::MakeBindGroup(
|
||||
device, bgls[1],
|
||||
{{0, buffer, 0, 4 * sizeof(float)}, {1, storageBuffer, 0, sizeof(kVertices)}});
|
||||
|
||||
bg1Vertex = utils::MakeBindGroup(device, bgls[1],
|
||||
{{0, buffer, 0, 4 * sizeof(float)},
|
||||
{{0, buffer, 0, 8 * sizeof(float)},
|
||||
{1, vertexStorageBuffer, 0, sizeof(kVertices)}});
|
||||
}
|
||||
|
||||
|
|
|
@ -315,7 +315,8 @@ TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
|
|||
false,
|
||||
WGPUTextureViewDimension_2D,
|
||||
WGPUTextureComponentType_Float,
|
||||
WGPUTextureFormat_RGBA8Unorm},
|
||||
WGPUTextureFormat_RGBA8Unorm,
|
||||
0},
|
||||
{1,
|
||||
WGPUShaderStage_Vertex,
|
||||
WGPUBindingType_SampledTexture,
|
||||
|
@ -323,7 +324,8 @@ TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
|
|||
false,
|
||||
WGPUTextureViewDimension_2D,
|
||||
WGPUTextureComponentType_Float,
|
||||
WGPUTextureFormat_RGBA8Unorm},
|
||||
WGPUTextureFormat_RGBA8Unorm,
|
||||
0},
|
||||
{2,
|
||||
static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment),
|
||||
WGPUBindingType_UniformBuffer,
|
||||
|
@ -331,7 +333,8 @@ TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
|
|||
false,
|
||||
WGPUTextureViewDimension_2D,
|
||||
WGPUTextureComponentType_Float,
|
||||
WGPUTextureFormat_RGBA8Unorm},
|
||||
WGPUTextureFormat_RGBA8Unorm,
|
||||
0},
|
||||
};
|
||||
WGPUBindGroupLayoutDescriptor bglDescriptor = {};
|
||||
bglDescriptor.entryCount = NUM_BINDINGS;
|
||||
|
|
|
@ -732,12 +732,11 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
|
|||
wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
|
||||
device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
|
||||
|
||||
bindGroups.push_back(
|
||||
utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
|
||||
{{0, transformBuffer, 0, sizeof(transformBuffer)},
|
||||
{1, sampler},
|
||||
{2, textureView},
|
||||
{3, uniformBuffer, 0, sizeof(fillColor)}}));
|
||||
bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
|
||||
{{0, transformBuffer, 0, sizeof(transform)},
|
||||
{1, sampler},
|
||||
{2, textureView},
|
||||
{3, uniformBuffer, 0, sizeof(fillColor)}}));
|
||||
}
|
||||
|
||||
std::array<float, 4> redColor = {1, 0, 0, 1};
|
||||
|
|
Loading…
Reference in New Issue