Remove several compile-time constants in favor of limits
Bug: dawn:685 Change-Id: Ifac25116c741fdab7b6a8093b4230065beca4773 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/65483 Reviewed-by: Austin Eng <enga@chromium.org> Commit-Queue: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
dcc520dc23
commit
91851e23a8
|
@ -37,7 +37,8 @@ float RandomFloat(float min, float max) {
|
|||
|
||||
constexpr size_t kNumTriangles = 10000;
|
||||
|
||||
struct alignas(kMinUniformBufferOffsetAlignment) ShaderData {
|
||||
// Aligned as minUniformBufferOffsetAlignment
|
||||
struct alignas(256) ShaderData {
|
||||
float scale;
|
||||
float time;
|
||||
float offsetX;
|
||||
|
|
|
@ -27,14 +27,6 @@ static constexpr uint32_t kTextureBytesPerRowAlignment = 256u;
|
|||
static constexpr uint32_t kMaxInterStageShaderComponents = 60u;
|
||||
static constexpr uint32_t kMaxInterStageShaderVariables = kMaxInterStageShaderComponents / 4;
|
||||
|
||||
// Compute constants
|
||||
static constexpr uint32_t kMaxComputeWorkgroupStorageSize = 16352u;
|
||||
static constexpr uint32_t kMaxComputeWorkgroupInvocations = 256u;
|
||||
static constexpr uint32_t kMaxComputePerDimensionDispatchSize = 65535u;
|
||||
static constexpr uint32_t kMaxComputeWorkgroupSizeX = 256;
|
||||
static constexpr uint32_t kMaxComputeWorkgroupSizeY = 256;
|
||||
static constexpr uint32_t kMaxComputeWorkgroupSizeZ = 64;
|
||||
|
||||
// Per stage limits
|
||||
static constexpr uint32_t kMaxSampledTexturesPerShaderStage = 16;
|
||||
static constexpr uint32_t kMaxSamplersPerShaderStage = 16;
|
||||
|
@ -46,12 +38,6 @@ static constexpr uint32_t kMaxUniformBuffersPerShaderStage = 12;
|
|||
static constexpr uint32_t kMaxDynamicUniformBuffersPerPipelineLayout = 8u;
|
||||
static constexpr uint32_t kMaxDynamicStorageBuffersPerPipelineLayout = 4u;
|
||||
|
||||
// Buffer binding constraints
|
||||
static constexpr uint64_t kMaxUniformBufferBindingSize = 16384u;
|
||||
static constexpr uint64_t kMaxStorageBufferBindingSize = 134217728u;
|
||||
static constexpr uint64_t kMinUniformBufferOffsetAlignment = 256u;
|
||||
static constexpr uint64_t kMinStorageBufferOffsetAlignment = 256u;
|
||||
|
||||
// Indirect command sizes
|
||||
static constexpr uint64_t kDispatchIndirectSize = 3 * sizeof(uint32_t);
|
||||
static constexpr uint64_t kDrawIndirectSize = 4 * sizeof(uint32_t);
|
||||
|
@ -61,15 +47,6 @@ static constexpr uint64_t kDrawIndexedIndirectSize = 5 * sizeof(uint32_t);
|
|||
static constexpr float kLodMin = 0.0;
|
||||
static constexpr float kLodMax = 1000.0;
|
||||
|
||||
// Max texture size constants
|
||||
static constexpr uint32_t kMaxTextureDimension1D = 8192u;
|
||||
static constexpr uint32_t kMaxTextureDimension2D = 8192u;
|
||||
static constexpr uint32_t kMaxTextureDimension3D = 2048u;
|
||||
static constexpr uint32_t kMaxTextureArrayLayers = 256u;
|
||||
static constexpr uint32_t kMaxTexture2DMipLevels = 14u;
|
||||
static_assert(1 << (kMaxTexture2DMipLevels - 1) == kMaxTextureDimension2D,
|
||||
"kMaxTexture2DMipLevels and kMaxTextureDimension2D size mismatch");
|
||||
|
||||
// Offset alignment for CopyB2B. Strictly speaking this alignment is required only
|
||||
// on macOS, but we decide to do it on all platforms.
|
||||
static constexpr uint64_t kCopyBufferToBufferOffsetAlignment = 4u;
|
||||
|
|
|
@ -50,19 +50,22 @@ namespace dawn_native {
|
|||
switch (bindingInfo.buffer.type) {
|
||||
case wgpu::BufferBindingType::Uniform:
|
||||
requiredUsage = wgpu::BufferUsage::Uniform;
|
||||
maxBindingSize = kMaxUniformBufferBindingSize;
|
||||
requiredBindingAlignment = kMinUniformBufferOffsetAlignment;
|
||||
maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minUniformBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Storage:
|
||||
case wgpu::BufferBindingType::ReadOnlyStorage:
|
||||
requiredUsage = wgpu::BufferUsage::Storage;
|
||||
maxBindingSize = kMaxStorageBufferBindingSize;
|
||||
requiredBindingAlignment = kMinStorageBufferOffsetAlignment;
|
||||
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
break;
|
||||
case kInternalStorageBufferBinding:
|
||||
requiredUsage = kInternalStorageBuffer;
|
||||
maxBindingSize = kMaxStorageBufferBindingSize;
|
||||
requiredBindingAlignment = kMinStorageBufferOffsetAlignment;
|
||||
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Undefined:
|
||||
UNREACHABLE();
|
||||
|
|
|
@ -28,8 +28,8 @@ namespace dawn_native {
|
|||
|
||||
namespace {
|
||||
|
||||
MaybeError ValidatePerDimensionDispatchSizeLimit(uint32_t size) {
|
||||
if (size > kMaxComputePerDimensionDispatchSize) {
|
||||
MaybeError ValidatePerDimensionDispatchSizeLimit(const DeviceBase* device, uint32_t size) {
|
||||
if (size > device->GetLimits().v1.maxComputeWorkgroupsPerDimension) {
|
||||
return DAWN_VALIDATION_ERROR("Dispatch size exceeds defined limits");
|
||||
}
|
||||
|
||||
|
@ -85,9 +85,9 @@ namespace dawn_native {
|
|||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
|
||||
DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(x));
|
||||
DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(y));
|
||||
DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(z));
|
||||
DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(GetDevice(), x));
|
||||
DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(GetDevice(), y));
|
||||
DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(GetDevice(), z));
|
||||
}
|
||||
|
||||
// Record the synchronization scope for Dispatch, which is just the current
|
||||
|
|
|
@ -1560,6 +1560,10 @@ namespace dawn_native {
|
|||
}
|
||||
}
|
||||
|
||||
const CombinedLimits& DeviceBase::GetLimits() const {
|
||||
return mLimits;
|
||||
}
|
||||
|
||||
AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
|
||||
return mAsyncTaskManager.get();
|
||||
}
|
||||
|
|
|
@ -336,6 +336,8 @@ namespace dawn_native {
|
|||
|
||||
virtual float GetTimestampPeriodInNS() const = 0;
|
||||
|
||||
const CombinedLimits& GetLimits() const;
|
||||
|
||||
AsyncTaskManager* GetAsyncTaskManager() const;
|
||||
CallbackTaskManager* GetCallbackTaskManager() const;
|
||||
dawn_platform::WorkerTaskPool* GetWorkerTaskPool() const;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "common/Constants.h"
|
||||
#include "common/RefCounted.h"
|
||||
#include "dawn_native/IndirectDrawValidationEncoder.h"
|
||||
#include "dawn_native/Limits.h"
|
||||
#include "dawn_native/RenderBundle.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
@ -24,15 +25,10 @@
|
|||
|
||||
namespace dawn_native {
|
||||
|
||||
namespace {
|
||||
|
||||
// In the unlikely scenario that indirect offsets used over a single buffer span more than
|
||||
// this length of the buffer, we split the validation work into multiple batches.
|
||||
constexpr uint64_t kMaxBatchOffsetRange = kMaxStorageBufferBindingSize -
|
||||
kMinStorageBufferOffsetAlignment -
|
||||
kDrawIndexedIndirectSize;
|
||||
|
||||
} // namespace
|
||||
uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
|
||||
return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
|
||||
kDrawIndexedIndirectSize;
|
||||
}
|
||||
|
||||
IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
|
||||
BufferBase* indirectBuffer)
|
||||
|
@ -40,12 +36,14 @@ namespace dawn_native {
|
|||
}
|
||||
|
||||
void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndexedIndirectDraw(
|
||||
uint32_t maxDrawCallsPerIndirectValidationBatch,
|
||||
uint32_t maxBatchOffsetRange,
|
||||
IndexedIndirectDraw draw) {
|
||||
const uint64_t newOffset = draw.clientBufferOffset;
|
||||
auto it = mBatches.begin();
|
||||
while (it != mBatches.end()) {
|
||||
IndexedIndirectValidationBatch& batch = *it;
|
||||
if (batch.draws.size() >= kMaxDrawCallsPerIndirectValidationBatch) {
|
||||
if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
|
||||
// This batch is full. If its minOffset is to the right of the new offset, we can
|
||||
// just insert a new batch here.
|
||||
if (newOffset < batch.minOffset) {
|
||||
|
@ -62,16 +60,14 @@ namespace dawn_native {
|
|||
return;
|
||||
}
|
||||
|
||||
if (newOffset < batch.minOffset &&
|
||||
batch.maxOffset - newOffset <= kMaxBatchOffsetRange) {
|
||||
if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
|
||||
// We can extend this batch to the left in order to fit the new offset.
|
||||
batch.minOffset = newOffset;
|
||||
batch.draws.push_back(std::move(draw));
|
||||
return;
|
||||
}
|
||||
|
||||
if (newOffset > batch.maxOffset &&
|
||||
newOffset - batch.minOffset <= kMaxBatchOffsetRange) {
|
||||
if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
|
||||
// We can extend this batch to the right in order to fit the new offset.
|
||||
batch.maxOffset = newOffset;
|
||||
batch.draws.push_back(std::move(draw));
|
||||
|
@ -95,14 +91,16 @@ namespace dawn_native {
|
|||
}
|
||||
|
||||
void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
|
||||
uint32_t maxDrawCallsPerIndirectValidationBatch,
|
||||
uint32_t maxBatchOffsetRange,
|
||||
const IndexedIndirectValidationBatch& newBatch) {
|
||||
auto it = mBatches.begin();
|
||||
while (it != mBatches.end()) {
|
||||
IndexedIndirectValidationBatch& batch = *it;
|
||||
uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
|
||||
uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
|
||||
if (max - min <= kMaxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
|
||||
kMaxDrawCallsPerIndirectValidationBatch) {
|
||||
if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
|
||||
maxDrawCallsPerIndirectValidationBatch) {
|
||||
// This batch fits within the limits of an existing batch. Merge it.
|
||||
batch.minOffset = min;
|
||||
batch.maxOffset = max;
|
||||
|
@ -124,7 +122,10 @@ namespace dawn_native {
|
|||
return mBatches;
|
||||
}
|
||||
|
||||
IndirectDrawMetadata::IndirectDrawMetadata() = default;
|
||||
IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
|
||||
: mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)),
|
||||
mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)) {
|
||||
}
|
||||
|
||||
IndirectDrawMetadata::~IndirectDrawMetadata() = default;
|
||||
|
||||
|
@ -150,7 +151,7 @@ namespace dawn_native {
|
|||
if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
|
||||
// We already have batches for the same config. Merge the new ones in.
|
||||
for (const IndexedIndirectValidationBatch& batch : entry.second.GetBatches()) {
|
||||
it->second.AddBatch(batch);
|
||||
it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
|
||||
}
|
||||
} else {
|
||||
mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, entry.second);
|
||||
|
@ -187,7 +188,8 @@ namespace dawn_native {
|
|||
IndexedIndirectDraw draw;
|
||||
draw.clientBufferOffset = indirectOffset;
|
||||
draw.bufferLocation = drawCmdIndirectBufferLocation;
|
||||
it->second.AddIndexedIndirectDraw(std::move(draw));
|
||||
it->second.AddIndexedIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange,
|
||||
std::move(draw));
|
||||
}
|
||||
|
||||
} // namespace dawn_native
|
||||
|
|
|
@ -31,6 +31,11 @@
|
|||
namespace dawn_native {
|
||||
|
||||
class RenderBundleBase;
|
||||
struct CombinedLimits;
|
||||
|
||||
// In the unlikely scenario that indirect offsets used over a single buffer span more than
|
||||
// this length of the buffer, we split the validation work into multiple batches.
|
||||
uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
|
||||
|
||||
// Metadata corresponding to the validation requirements of a single render pass. This metadata
|
||||
// is accumulated while its corresponding render pass is encoded, and is later used to encode
|
||||
|
@ -58,12 +63,16 @@ namespace dawn_native {
|
|||
|
||||
// Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
|
||||
// assigned (and deferred) buffer ref and relative offset before returning.
|
||||
void AddIndexedIndirectDraw(IndexedIndirectDraw draw);
|
||||
void AddIndexedIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
|
||||
uint32_t maxBatchOffsetRange,
|
||||
IndexedIndirectDraw draw);
|
||||
|
||||
// Adds draw calls from an already-computed batch, e.g. from a previously encoded
|
||||
// RenderBundle. The added batch is merged into an existing batch if possible, otherwise
|
||||
// it's added to mBatch.
|
||||
void AddBatch(const IndexedIndirectValidationBatch& batch);
|
||||
void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
|
||||
uint32_t maxBatchOffsetRange,
|
||||
const IndexedIndirectValidationBatch& batch);
|
||||
|
||||
const std::vector<IndexedIndirectValidationBatch>& GetBatches() const;
|
||||
|
||||
|
@ -87,7 +96,7 @@ namespace dawn_native {
|
|||
using IndexedIndirectBufferValidationInfoMap =
|
||||
std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
|
||||
|
||||
IndirectDrawMetadata();
|
||||
explicit IndirectDrawMetadata(const CombinedLimits& limits);
|
||||
~IndirectDrawMetadata();
|
||||
|
||||
IndirectDrawMetadata(IndirectDrawMetadata&&);
|
||||
|
@ -105,6 +114,9 @@ namespace dawn_native {
|
|||
private:
|
||||
IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
|
||||
std::set<RenderBundleBase*> mAddedBundles;
|
||||
|
||||
uint32_t mMaxDrawCallsPerBatch;
|
||||
uint32_t mMaxBatchOffsetRange;
|
||||
};
|
||||
|
||||
} // namespace dawn_native
|
||||
|
|
|
@ -188,12 +188,15 @@ namespace dawn_native {
|
|||
|
||||
} // namespace
|
||||
|
||||
const uint32_t kBatchDrawCallLimitByDispatchSize =
|
||||
kMaxComputePerDimensionDispatchSize * kWorkgroupSize;
|
||||
const uint32_t kBatchDrawCallLimitByStorageBindingSize =
|
||||
(kMaxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
|
||||
const uint32_t kMaxDrawCallsPerIndirectValidationBatch =
|
||||
std::min(kBatchDrawCallLimitByDispatchSize, kBatchDrawCallLimitByStorageBindingSize);
|
||||
uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
|
||||
const uint64_t batchDrawCallLimitByDispatchSize =
|
||||
static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
|
||||
const uint64_t batchDrawCallLimitByStorageBindingSize =
|
||||
(limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
|
||||
return static_cast<uint32_t>(
|
||||
std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
|
||||
uint64_t(std::numeric_limits<uint32_t>::max())}));
|
||||
}
|
||||
|
||||
MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
|
||||
CommandEncoder* commandEncoder,
|
||||
|
@ -232,13 +235,18 @@ namespace dawn_native {
|
|||
return {};
|
||||
}
|
||||
|
||||
const uint32_t maxStorageBufferBindingSize =
|
||||
device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
const uint32_t minStorageBufferOffsetAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
|
||||
for (auto& entry : bufferInfoMap) {
|
||||
const IndirectDrawMetadata::IndexedIndirectConfig& config = entry.first;
|
||||
BufferBase* clientIndirectBuffer = config.first;
|
||||
for (const IndirectDrawMetadata::IndexedIndirectValidationBatch& batch :
|
||||
entry.second.GetBatches()) {
|
||||
const uint64_t minOffsetFromAlignedBoundary =
|
||||
batch.minOffset % kMinStorageBufferOffsetAlignment;
|
||||
batch.minOffset % minStorageBufferOffsetAlignment;
|
||||
const uint64_t minOffsetAlignedDown =
|
||||
batch.minOffset - minOffsetFromAlignedBoundary;
|
||||
|
||||
|
@ -253,18 +261,18 @@ namespace dawn_native {
|
|||
|
||||
newBatch.validatedParamsSize = batch.draws.size() * kDrawIndexedIndirectSize;
|
||||
newBatch.validatedParamsOffset =
|
||||
Align(validatedParamsSize, kMinStorageBufferOffsetAlignment);
|
||||
Align(validatedParamsSize, minStorageBufferOffsetAlignment);
|
||||
validatedParamsSize = newBatch.validatedParamsOffset + newBatch.validatedParamsSize;
|
||||
if (validatedParamsSize > kMaxStorageBufferBindingSize) {
|
||||
if (validatedParamsSize > maxStorageBufferBindingSize) {
|
||||
return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
|
||||
}
|
||||
|
||||
Pass* currentPass = passes.empty() ? nullptr : &passes.back();
|
||||
if (currentPass && currentPass->clientIndirectBuffer == clientIndirectBuffer) {
|
||||
uint64_t nextBatchDataOffset =
|
||||
Align(currentPass->batchDataSize, kMinStorageBufferOffsetAlignment);
|
||||
Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
|
||||
uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
|
||||
if (newPassBatchDataSize <= kMaxStorageBufferBindingSize) {
|
||||
if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
|
||||
// We can fit this batch in the current pass.
|
||||
newBatch.dataBufferOffset = nextBatchDataOffset;
|
||||
currentPass->batchDataSize = newPassBatchDataSize;
|
||||
|
|
|
@ -21,13 +21,14 @@
|
|||
namespace dawn_native {
|
||||
|
||||
class CommandEncoder;
|
||||
struct CombinedLimits;
|
||||
class DeviceBase;
|
||||
class RenderPassResourceUsageTracker;
|
||||
|
||||
// The maximum number of draws call we can fit into a single validation batch. This is
|
||||
// essentially limited by the number of indirect parameter blocks that can fit into the maximum
|
||||
// allowed storage binding size (about 6.7M).
|
||||
extern const uint32_t kMaxDrawCallsPerIndirectValidationBatch;
|
||||
// allowed storage binding size (with the base limits, it is about 6.7M).
|
||||
uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
|
||||
|
||||
MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
|
||||
CommandEncoder* commandEncoder,
|
||||
|
|
|
@ -139,13 +139,12 @@ namespace dawn_native {
|
|||
uint64_t requiredAlignment;
|
||||
switch (bindingInfo.buffer.type) {
|
||||
case wgpu::BufferBindingType::Uniform:
|
||||
requiredAlignment = kMinUniformBufferOffsetAlignment;
|
||||
requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Storage:
|
||||
case wgpu::BufferBindingType::ReadOnlyStorage:
|
||||
case kInternalStorageBufferBinding:
|
||||
requiredAlignment = kMinStorageBufferOffsetAlignment;
|
||||
requiredAlignment = kMinStorageBufferOffsetAlignment;
|
||||
requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Undefined:
|
||||
UNREACHABLE();
|
||||
|
|
|
@ -44,7 +44,7 @@ namespace dawn_native {
|
|||
}
|
||||
|
||||
RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
|
||||
: ApiObjectBase(device, errorTag) {
|
||||
: ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {
|
||||
}
|
||||
|
||||
ObjectType RenderBundleBase::GetType() const {
|
||||
|
|
|
@ -34,6 +34,7 @@ namespace dawn_native {
|
|||
EncodingContext* encodingContext,
|
||||
Ref<AttachmentState> attachmentState)
|
||||
: ProgrammablePassEncoder(device, encodingContext),
|
||||
mIndirectDrawMetadata(device->GetLimits()),
|
||||
mAttachmentState(std::move(attachmentState)),
|
||||
mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
|
||||
mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
|
||||
|
@ -43,6 +44,7 @@ namespace dawn_native {
|
|||
EncodingContext* encodingContext,
|
||||
ErrorTag errorTag)
|
||||
: ProgrammablePassEncoder(device, encodingContext, errorTag),
|
||||
mIndirectDrawMetadata(device->GetLimits()),
|
||||
mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
|
||||
mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
|
||||
}
|
||||
|
|
|
@ -609,10 +609,12 @@ namespace dawn_native {
|
|||
}
|
||||
|
||||
ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
|
||||
DeviceBase*,
|
||||
const DeviceBase* device,
|
||||
const tint::Program* program) {
|
||||
ASSERT(program->IsValid());
|
||||
|
||||
const CombinedLimits& limits = device->GetLimits();
|
||||
|
||||
EntryPointMetadataTable result;
|
||||
|
||||
tint::inspector::Inspector inspector(program);
|
||||
|
@ -645,36 +647,32 @@ namespace dawn_native {
|
|||
DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
|
||||
|
||||
if (metadata->stage == SingleShaderStage::Compute) {
|
||||
DAWN_INVALID_IF(entryPoint.workgroup_size_x > kMaxComputeWorkgroupSizeX ||
|
||||
entryPoint.workgroup_size_y > kMaxComputeWorkgroupSizeY ||
|
||||
entryPoint.workgroup_size_z > kMaxComputeWorkgroupSizeZ,
|
||||
"Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
|
||||
"maximum allowed (%u, %u, %u).",
|
||||
entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
|
||||
entryPoint.workgroup_size_z, kMaxComputeWorkgroupSizeX,
|
||||
kMaxComputeWorkgroupSizeY, kMaxComputeWorkgroupSizeZ);
|
||||
DAWN_INVALID_IF(
|
||||
entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
|
||||
entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
|
||||
entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
|
||||
"Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
|
||||
"maximum allowed (%u, %u, %u).",
|
||||
entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
|
||||
entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
|
||||
limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
|
||||
|
||||
// Dimensions have already been validated against their individual limits above.
|
||||
// This assertion ensures that the product of such limited dimensions cannot
|
||||
// possibly overflow a uint32_t.
|
||||
static_assert(static_cast<uint64_t>(kMaxComputeWorkgroupSizeX) *
|
||||
kMaxComputeWorkgroupSizeY * kMaxComputeWorkgroupSizeZ <=
|
||||
std::numeric_limits<uint32_t>::max(),
|
||||
"Per-dimension workgroup size limits are too high");
|
||||
uint32_t numInvocations = entryPoint.workgroup_size_x *
|
||||
// Cast to uint64_t to avoid overflow in this multiplication.
|
||||
uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
|
||||
entryPoint.workgroup_size_y *
|
||||
entryPoint.workgroup_size_z;
|
||||
DAWN_INVALID_IF(numInvocations > kMaxComputeWorkgroupInvocations,
|
||||
DAWN_INVALID_IF(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
|
||||
"The total number of workgroup invocations (%u) exceeds the "
|
||||
"maximum allowed (%u).",
|
||||
numInvocations, kMaxComputeWorkgroupInvocations);
|
||||
numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
|
||||
|
||||
const size_t workgroupStorageSize =
|
||||
inspector.GetWorkgroupStorageSize(entryPoint.name);
|
||||
DAWN_INVALID_IF(workgroupStorageSize > kMaxComputeWorkgroupStorageSize,
|
||||
DAWN_INVALID_IF(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
|
||||
"The total use of workgroup storage (%u bytes) is larger than "
|
||||
"the maximum allowed (%u bytes).",
|
||||
workgroupStorageSize, kMaxComputeWorkgroupStorageSize);
|
||||
workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
|
||||
|
||||
metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
|
||||
metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
|
||||
|
|
|
@ -90,8 +90,8 @@ namespace dawn_native {
|
|||
return DAWN_VALIDATION_ERROR("Swapchain size can't be empty");
|
||||
}
|
||||
|
||||
if (descriptor->width > kMaxTextureDimension2D ||
|
||||
descriptor->height > kMaxTextureDimension2D) {
|
||||
if (descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
|
||||
descriptor->height > device->GetLimits().v1.maxTextureDimension2D) {
|
||||
return DAWN_VALIDATION_ERROR("Swapchain size too big");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -173,19 +173,21 @@ namespace dawn_native {
|
|||
return {};
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureSize(const TextureDescriptor* descriptor, const Format* format) {
|
||||
MaybeError ValidateTextureSize(const DeviceBase* device,
|
||||
const TextureDescriptor* descriptor,
|
||||
const Format* format) {
|
||||
ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
|
||||
descriptor->size.depthOrArrayLayers != 0);
|
||||
|
||||
const CombinedLimits& limits = device->GetLimits();
|
||||
Extent3D maxExtent;
|
||||
switch (descriptor->dimension) {
|
||||
case wgpu::TextureDimension::e2D:
|
||||
maxExtent = {kMaxTextureDimension2D, kMaxTextureDimension2D,
|
||||
kMaxTextureArrayLayers};
|
||||
maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
|
||||
limits.v1.maxTextureArrayLayers};
|
||||
break;
|
||||
case wgpu::TextureDimension::e3D:
|
||||
maxExtent = {kMaxTextureDimension3D, kMaxTextureDimension3D,
|
||||
kMaxTextureDimension3D};
|
||||
maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
|
||||
limits.v1.maxTextureDimension3D};
|
||||
break;
|
||||
case wgpu::TextureDimension::e1D:
|
||||
default:
|
||||
|
@ -210,8 +212,6 @@ namespace dawn_native {
|
|||
"Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
|
||||
descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
|
||||
|
||||
ASSERT(descriptor->mipLevelCount <= kMaxTexture2DMipLevels);
|
||||
|
||||
if (format->isCompressed) {
|
||||
const TexelBlockInfo& blockInfo =
|
||||
format->GetAspectInfo(wgpu::TextureAspect::All).block;
|
||||
|
@ -308,7 +308,7 @@ namespace dawn_native {
|
|||
"The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
|
||||
descriptor->dimension, format->format);
|
||||
|
||||
DAWN_TRY(ValidateTextureSize(descriptor, format));
|
||||
DAWN_TRY(ValidateTextureSize(device, descriptor, format));
|
||||
|
||||
// TODO(crbug.com/dawn/838): Implement a workaround for this issue.
|
||||
// Readbacks from the non-zero mip of a stencil texture may contain garbage data.
|
||||
|
@ -555,12 +555,7 @@ namespace dawn_native {
|
|||
uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
|
||||
uint32_t arraySlice,
|
||||
Aspect aspect) const {
|
||||
ASSERT(arraySlice <= kMaxTextureArrayLayers);
|
||||
ASSERT(mipLevel <= kMaxTexture2DMipLevels);
|
||||
ASSERT(HasOneBit(aspect));
|
||||
static_assert(
|
||||
kMaxTexture2DMipLevels <= std::numeric_limits<uint32_t>::max() / kMaxTextureArrayLayers,
|
||||
"texture size overflows uint32_t");
|
||||
return mipLevel +
|
||||
GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include "dawn_native/vulkan/AdapterVk.h"
|
||||
|
||||
#include "dawn_native/Limits.h"
|
||||
#include "dawn_native/vulkan/BackendVk.h"
|
||||
#include "dawn_native/vulkan/DeviceVk.h"
|
||||
|
||||
|
@ -78,6 +79,9 @@ namespace dawn_native { namespace vulkan {
|
|||
}
|
||||
|
||||
MaybeError Adapter::CheckCoreWebGPUSupport() {
|
||||
Limits baseLimits;
|
||||
GetDefaultLimits(&baseLimits);
|
||||
|
||||
// Needed for viewport Y-flip.
|
||||
if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
|
||||
return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
|
||||
|
@ -118,106 +122,110 @@ namespace dawn_native { namespace vulkan {
|
|||
|
||||
// Check base WebGPU limits are supported.
|
||||
const VkPhysicalDeviceLimits& limits = mDeviceInfo.properties.limits;
|
||||
if (limits.maxImageDimension1D < kMaxTextureDimension1D) {
|
||||
if (limits.maxImageDimension1D < baseLimits.maxTextureDimension1D) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxTextureDimension1D");
|
||||
}
|
||||
if (limits.maxImageDimension2D < kMaxTextureDimension2D ||
|
||||
limits.maxImageDimensionCube < kMaxTextureDimension2D ||
|
||||
limits.maxFramebufferWidth < kMaxTextureDimension2D ||
|
||||
limits.maxFramebufferHeight < kMaxTextureDimension2D ||
|
||||
limits.maxViewportDimensions[0] < kMaxTextureDimension2D ||
|
||||
limits.maxViewportDimensions[1] < kMaxTextureDimension2D ||
|
||||
limits.viewportBoundsRange[1] < kMaxTextureDimension2D) {
|
||||
if (limits.maxImageDimension2D < baseLimits.maxTextureDimension2D ||
|
||||
limits.maxImageDimensionCube < baseLimits.maxTextureDimension2D ||
|
||||
limits.maxFramebufferWidth < baseLimits.maxTextureDimension2D ||
|
||||
limits.maxFramebufferHeight < baseLimits.maxTextureDimension2D ||
|
||||
limits.maxViewportDimensions[0] < baseLimits.maxTextureDimension2D ||
|
||||
limits.maxViewportDimensions[1] < baseLimits.maxTextureDimension2D ||
|
||||
limits.viewportBoundsRange[1] < baseLimits.maxTextureDimension2D) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxTextureDimension2D");
|
||||
}
|
||||
if (limits.maxImageDimension3D < kMaxTextureDimension3D) {
|
||||
if (limits.maxImageDimension3D < baseLimits.maxTextureDimension3D) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxTextureDimension3D");
|
||||
}
|
||||
if (limits.maxImageArrayLayers < kMaxTextureArrayLayers) {
|
||||
if (limits.maxImageArrayLayers < baseLimits.maxTextureArrayLayers) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxTextureArrayLayers");
|
||||
}
|
||||
if (limits.maxBoundDescriptorSets < kMaxBindGroups) {
|
||||
if (limits.maxBoundDescriptorSets < baseLimits.maxBindGroups) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxBindGroups");
|
||||
}
|
||||
if (limits.maxDescriptorSetUniformBuffersDynamic <
|
||||
kMaxDynamicUniformBuffersPerPipelineLayout) {
|
||||
baseLimits.maxDynamicUniformBuffersPerPipelineLayout) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxDynamicUniformBuffersPerPipelineLayout");
|
||||
}
|
||||
if (limits.maxDescriptorSetStorageBuffersDynamic <
|
||||
kMaxDynamicStorageBuffersPerPipelineLayout) {
|
||||
baseLimits.maxDynamicStorageBuffersPerPipelineLayout) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxDynamicStorageBuffersPerPipelineLayout");
|
||||
}
|
||||
if (limits.maxPerStageDescriptorSampledImages < kMaxSampledTexturesPerShaderStage) {
|
||||
if (limits.maxPerStageDescriptorSampledImages <
|
||||
baseLimits.maxSampledTexturesPerShaderStage) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxSampledTexturesPerShaderStage");
|
||||
}
|
||||
if (limits.maxPerStageDescriptorSamplers < kMaxSamplersPerShaderStage) {
|
||||
if (limits.maxPerStageDescriptorSamplers < baseLimits.maxSamplersPerShaderStage) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxSamplersPerShaderStage");
|
||||
}
|
||||
if (limits.maxPerStageDescriptorStorageBuffers < kMaxStorageBuffersPerShaderStage) {
|
||||
if (limits.maxPerStageDescriptorStorageBuffers <
|
||||
baseLimits.maxStorageBuffersPerShaderStage) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxStorageBuffersPerShaderStage");
|
||||
}
|
||||
if (limits.maxPerStageDescriptorStorageImages < kMaxStorageTexturesPerShaderStage) {
|
||||
if (limits.maxPerStageDescriptorStorageImages <
|
||||
baseLimits.maxStorageTexturesPerShaderStage) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxStorageTexturesPerShaderStage");
|
||||
}
|
||||
if (limits.maxPerStageDescriptorUniformBuffers < kMaxUniformBuffersPerShaderStage) {
|
||||
if (limits.maxPerStageDescriptorUniformBuffers <
|
||||
baseLimits.maxUniformBuffersPerShaderStage) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxUniformBuffersPerShaderStage");
|
||||
}
|
||||
if (limits.maxUniformBufferRange < kMaxUniformBufferBindingSize) {
|
||||
if (limits.maxUniformBufferRange < baseLimits.maxUniformBufferBindingSize) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxUniformBufferBindingSize");
|
||||
}
|
||||
if (limits.maxStorageBufferRange < kMaxStorageBufferBindingSize) {
|
||||
if (limits.maxStorageBufferRange < baseLimits.maxStorageBufferBindingSize) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxStorageBufferBindingSize");
|
||||
}
|
||||
if (limits.minUniformBufferOffsetAlignment > kMinUniformBufferOffsetAlignment) {
|
||||
if (limits.minUniformBufferOffsetAlignment > baseLimits.minUniformBufferOffsetAlignment) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for minUniformBufferOffsetAlignment");
|
||||
}
|
||||
if (limits.minStorageBufferOffsetAlignment > kMinStorageBufferOffsetAlignment) {
|
||||
if (limits.minStorageBufferOffsetAlignment > baseLimits.minStorageBufferOffsetAlignment) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for minStorageBufferOffsetAlignment");
|
||||
}
|
||||
if (limits.maxVertexInputBindings < kMaxVertexBuffers) {
|
||||
if (limits.maxVertexInputBindings < baseLimits.maxVertexBuffers) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBuffers");
|
||||
}
|
||||
if (limits.maxVertexInputAttributes < kMaxVertexAttributes) {
|
||||
if (limits.maxVertexInputAttributes < baseLimits.maxVertexAttributes) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexAttributes");
|
||||
}
|
||||
if (limits.maxVertexInputBindingStride < kMaxVertexBufferArrayStride ||
|
||||
limits.maxVertexInputAttributeOffset < kMaxVertexBufferArrayStride - 1) {
|
||||
if (limits.maxVertexInputBindingStride < baseLimits.maxVertexBufferArrayStride ||
|
||||
limits.maxVertexInputAttributeOffset < baseLimits.maxVertexBufferArrayStride - 1) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
|
||||
}
|
||||
if (limits.maxVertexOutputComponents < kMaxInterStageShaderComponents ||
|
||||
limits.maxFragmentInputComponents < kMaxInterStageShaderComponents) {
|
||||
if (limits.maxVertexOutputComponents < baseLimits.maxInterStageShaderComponents ||
|
||||
limits.maxFragmentInputComponents < baseLimits.maxInterStageShaderComponents) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxInterStageShaderComponents");
|
||||
}
|
||||
if (limits.maxComputeSharedMemorySize < kMaxComputeWorkgroupStorageSize) {
|
||||
if (limits.maxComputeSharedMemorySize < baseLimits.maxComputeWorkgroupStorageSize) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxComputeWorkgroupStorageSize");
|
||||
}
|
||||
if (limits.maxComputeWorkGroupInvocations < kMaxComputeWorkgroupInvocations) {
|
||||
if (limits.maxComputeWorkGroupInvocations < baseLimits.maxComputeInvocationsPerWorkgroup) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxComputeWorkgroupInvocations");
|
||||
"Insufficient Vulkan limits for maxComputeInvocationsPerWorkgroup");
|
||||
}
|
||||
if (limits.maxComputeWorkGroupSize[0] < kMaxComputeWorkgroupSizeX ||
|
||||
limits.maxComputeWorkGroupSize[1] < kMaxComputeWorkgroupSizeY ||
|
||||
limits.maxComputeWorkGroupSize[2] < kMaxComputeWorkgroupSizeZ) {
|
||||
if (limits.maxComputeWorkGroupSize[0] < baseLimits.maxComputeWorkgroupSizeX ||
|
||||
limits.maxComputeWorkGroupSize[1] < baseLimits.maxComputeWorkgroupSizeY ||
|
||||
limits.maxComputeWorkGroupSize[2] < baseLimits.maxComputeWorkgroupSizeZ) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxComputeWorkgroupSize");
|
||||
}
|
||||
if (limits.maxComputeWorkGroupCount[0] < kMaxComputePerDimensionDispatchSize ||
|
||||
limits.maxComputeWorkGroupCount[1] < kMaxComputePerDimensionDispatchSize ||
|
||||
limits.maxComputeWorkGroupCount[2] < kMaxComputePerDimensionDispatchSize) {
|
||||
if (limits.maxComputeWorkGroupCount[0] < baseLimits.maxComputeWorkgroupsPerDimension ||
|
||||
limits.maxComputeWorkGroupCount[1] < baseLimits.maxComputeWorkgroupsPerDimension ||
|
||||
limits.maxComputeWorkGroupCount[2] < baseLimits.maxComputeWorkgroupsPerDimension) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan limits for maxComputePerDimensionDispatchSize");
|
||||
"Insufficient Vulkan limits for maxComputeWorkgroupsPerDimension");
|
||||
}
|
||||
if (limits.maxColorAttachments < kMaxColorAttachments) {
|
||||
return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
|
||||
|
@ -239,9 +247,9 @@ namespace dawn_native { namespace vulkan {
|
|||
uint32_t vendorId = mDeviceInfo.properties.vendorID;
|
||||
if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) &&
|
||||
!gpu_info::IsNvidia(vendorId)) {
|
||||
if (limits.maxFragmentCombinedOutputResources < kMaxColorAttachments +
|
||||
kMaxStorageTexturesPerShaderStage +
|
||||
kMaxStorageBuffersPerShaderStage) {
|
||||
if (limits.maxFragmentCombinedOutputResources <
|
||||
kMaxColorAttachments + baseLimits.maxStorageTexturesPerShaderStage +
|
||||
baseLimits.maxStorageBuffersPerShaderStage) {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"Insufficient Vulkan maxFragmentCombinedOutputResources limit");
|
||||
}
|
||||
|
|
|
@ -862,6 +862,13 @@ const wgpu::AdapterProperties& DawnTestBase::GetAdapterProperties() const {
|
|||
return mParam.adapterProperties;
|
||||
}
|
||||
|
||||
wgpu::SupportedLimits DawnTestBase::GetSupportedLimits() {
|
||||
WGPUSupportedLimits supportedLimits;
|
||||
supportedLimits.nextInChain = nullptr;
|
||||
dawn_native::GetProcs().deviceGetLimits(backendDevice, &supportedLimits);
|
||||
return *reinterpret_cast<wgpu::SupportedLimits*>(&supportedLimits);
|
||||
}
|
||||
|
||||
bool DawnTestBase::SupportsFeatures(const std::vector<const char*>& features) {
|
||||
ASSERT(mBackendAdapter);
|
||||
std::set<std::string> supportedFeaturesSet;
|
||||
|
|
|
@ -485,6 +485,11 @@ class DawnTestBase {
|
|||
|
||||
const wgpu::AdapterProperties& GetAdapterProperties() const;
|
||||
|
||||
// TODO(crbug.com/dawn/689): Use limits returned from the wire
|
||||
// This is implemented here because tests need to always query
|
||||
// the |backendDevice| since limits are not implemented in the wire.
|
||||
wgpu::SupportedLimits GetSupportedLimits();
|
||||
|
||||
private:
|
||||
utils::ScopedAutoreleasePool mObjCAutoreleasePool;
|
||||
AdapterTestParam mParam;
|
||||
|
|
|
@ -23,6 +23,11 @@ constexpr static uint32_t kRTSize = 8;
|
|||
|
||||
class BindGroupTests : public DawnTest {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
DawnTest::SetUp();
|
||||
mMinUniformBufferOffsetAlignment =
|
||||
GetSupportedLimits().limits.minUniformBufferOffsetAlignment;
|
||||
}
|
||||
wgpu::CommandBuffer CreateSimpleComputeCommandBuffer(const wgpu::ComputePipeline& pipeline,
|
||||
const wgpu::BindGroup& bindGroup) {
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
@ -116,6 +121,8 @@ class BindGroupTests : public DawnTest {
|
|||
|
||||
return device.CreateRenderPipeline(&pipelineDescriptor);
|
||||
}
|
||||
|
||||
uint32_t mMinUniformBufferOffsetAlignment;
|
||||
};
|
||||
|
||||
// Test a bindgroup reused in two command buffers in the same call to queue.Submit().
|
||||
|
@ -649,7 +656,7 @@ TEST_P(BindGroupTests, SetDynamicBindGroupBeforePipeline) {
|
|||
std::array<float, 4> color0 = {1, 0, 0, 0.501};
|
||||
std::array<float, 4> color1 = {0, 1, 0, 0.501};
|
||||
|
||||
size_t color1Offset = Align(sizeof(color0), kMinUniformBufferOffsetAlignment);
|
||||
size_t color1Offset = Align(sizeof(color0), mMinUniformBufferOffsetAlignment);
|
||||
|
||||
std::vector<uint8_t> data(color1Offset + sizeof(color1));
|
||||
memcpy(data.data(), color0.data(), sizeof(color0));
|
||||
|
@ -719,7 +726,7 @@ TEST_P(BindGroupTests, BindGroupsPersistAfterPipelineChange) {
|
|||
std::array<float, 4> color0 = {1, 0, 0, 0.5};
|
||||
std::array<float, 4> color1 = {0, 1, 0, 0.5};
|
||||
|
||||
size_t color1Offset = Align(sizeof(color0), kMinUniformBufferOffsetAlignment);
|
||||
size_t color1Offset = Align(sizeof(color0), mMinUniformBufferOffsetAlignment);
|
||||
|
||||
std::vector<uint8_t> data(color1Offset + sizeof(color1));
|
||||
memcpy(data.data(), color0.data(), sizeof(color0));
|
||||
|
@ -806,9 +813,9 @@ TEST_P(BindGroupTests, DrawThenChangePipelineAndBindGroup) {
|
|||
std::array<float, 4> color2 = {0, 0, 0, 0.501};
|
||||
std::array<float, 4> color3 = {0, 0, 1, 0};
|
||||
|
||||
size_t color1Offset = Align(sizeof(color0), kMinUniformBufferOffsetAlignment);
|
||||
size_t color2Offset = Align(color1Offset + sizeof(color1), kMinUniformBufferOffsetAlignment);
|
||||
size_t color3Offset = Align(color2Offset + sizeof(color2), kMinUniformBufferOffsetAlignment);
|
||||
size_t color1Offset = Align(sizeof(color0), mMinUniformBufferOffsetAlignment);
|
||||
size_t color2Offset = Align(color1Offset + sizeof(color1), mMinUniformBufferOffsetAlignment);
|
||||
size_t color3Offset = Align(color2Offset + sizeof(color2), mMinUniformBufferOffsetAlignment);
|
||||
|
||||
std::vector<uint8_t> data(color3Offset + sizeof(color3), 0);
|
||||
memcpy(data.data(), color0.data(), sizeof(color0));
|
||||
|
@ -906,9 +913,9 @@ TEST_P(BindGroupTests, DrawThenChangePipelineTwiceAndBindGroup) {
|
|||
std::array<float, 4> color3 = {0, 0, 0, 1};
|
||||
|
||||
size_t color0Offset = 0;
|
||||
size_t color1Offset = Align(color0Offset + sizeof(color0), kMinUniformBufferOffsetAlignment);
|
||||
size_t color2Offset = Align(color1Offset + sizeof(color1), kMinUniformBufferOffsetAlignment);
|
||||
size_t color3Offset = Align(color2Offset + sizeof(color2), kMinUniformBufferOffsetAlignment);
|
||||
size_t color1Offset = Align(color0Offset + sizeof(color0), mMinUniformBufferOffsetAlignment);
|
||||
size_t color2Offset = Align(color1Offset + sizeof(color1), mMinUniformBufferOffsetAlignment);
|
||||
size_t color3Offset = Align(color2Offset + sizeof(color2), mMinUniformBufferOffsetAlignment);
|
||||
|
||||
std::vector<uint8_t> data(color3Offset + sizeof(color3), 0);
|
||||
memcpy(data.data(), color0.data(), sizeof(color0));
|
||||
|
@ -985,14 +992,14 @@ TEST_P(BindGroupTests, DynamicOffsetOrder) {
|
|||
// We will put the following values and the respective offsets into a buffer.
|
||||
// The test will ensure that the correct dynamic offset is applied to each buffer by reading the
|
||||
// value from an offset binding.
|
||||
std::array<uint32_t, 3> offsets = {3 * kMinUniformBufferOffsetAlignment,
|
||||
1 * kMinUniformBufferOffsetAlignment,
|
||||
2 * kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 3> offsets = {3 * mMinUniformBufferOffsetAlignment,
|
||||
1 * mMinUniformBufferOffsetAlignment,
|
||||
2 * mMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 3> values = {21, 67, 32};
|
||||
|
||||
// Create three buffers large enough to by offset by the largest offset.
|
||||
wgpu::BufferDescriptor bufferDescriptor;
|
||||
bufferDescriptor.size = 3 * kMinUniformBufferOffsetAlignment + sizeof(uint32_t);
|
||||
bufferDescriptor.size = 3 * mMinUniformBufferOffsetAlignment + sizeof(uint32_t);
|
||||
bufferDescriptor.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
|
||||
|
||||
wgpu::Buffer buffer0 = device.CreateBuffer(&bufferDescriptor);
|
||||
|
@ -1075,19 +1082,19 @@ TEST_P(BindGroupTests, DynamicAndNonDynamicBindingsDoNotConflictAfterRemapping)
|
|||
uint32_t dynamicBufferBindingNumber = dynamicBufferFirst ? 0 : 1;
|
||||
uint32_t bufferBindingNumber = dynamicBufferFirst ? 1 : 0;
|
||||
|
||||
std::array<uint32_t, 1> offsets{kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 1> offsets{mMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> values = {21, 67};
|
||||
|
||||
// Create three buffers large enough to by offset by the largest offset.
|
||||
wgpu::BufferDescriptor bufferDescriptor;
|
||||
bufferDescriptor.size = 2 * kMinUniformBufferOffsetAlignment + sizeof(uint32_t);
|
||||
bufferDescriptor.size = 2 * mMinUniformBufferOffsetAlignment + sizeof(uint32_t);
|
||||
bufferDescriptor.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
|
||||
|
||||
wgpu::Buffer dynamicBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
|
||||
|
||||
// Populate the values
|
||||
queue.WriteBuffer(dynamicBuffer, kMinUniformBufferOffsetAlignment,
|
||||
queue.WriteBuffer(dynamicBuffer, mMinUniformBufferOffsetAlignment,
|
||||
&values[dynamicBufferBindingNumber], sizeof(uint32_t));
|
||||
queue.WriteBuffer(buffer, 0, &values[bufferBindingNumber], sizeof(uint32_t));
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
#include "utils/WGPUHelpers.h"
|
||||
|
||||
constexpr uint32_t kRTSize = 400;
|
||||
constexpr uint32_t kBufferElementsCount = kMinUniformBufferOffsetAlignment / sizeof(uint32_t) + 2;
|
||||
constexpr uint32_t kBufferSize = kBufferElementsCount * sizeof(uint32_t);
|
||||
constexpr uint32_t kBindingSize = 8;
|
||||
|
||||
class DynamicBufferOffsetTests : public DawnTest {
|
||||
|
@ -27,24 +25,29 @@ class DynamicBufferOffsetTests : public DawnTest {
|
|||
void SetUp() override {
|
||||
DawnTest::SetUp();
|
||||
|
||||
mMinUniformBufferOffsetAlignment =
|
||||
GetSupportedLimits().limits.minUniformBufferOffsetAlignment;
|
||||
|
||||
// Mix up dynamic and non dynamic resources in one bind group and using not continuous
|
||||
// binding number to cover more cases.
|
||||
std::array<uint32_t, kBufferElementsCount> uniformData = {0};
|
||||
std::vector<uint32_t> uniformData(mMinUniformBufferOffsetAlignment / sizeof(uint32_t) + 2);
|
||||
uniformData[0] = 1;
|
||||
uniformData[1] = 2;
|
||||
|
||||
mUniformBuffers[0] = utils::CreateBufferFromData(device, uniformData.data(), kBufferSize,
|
||||
mUniformBuffers[0] = utils::CreateBufferFromData(device, uniformData.data(),
|
||||
sizeof(uint32_t) * uniformData.size(),
|
||||
wgpu::BufferUsage::Uniform);
|
||||
|
||||
uniformData[uniformData.size() - 2] = 5;
|
||||
uniformData[uniformData.size() - 1] = 6;
|
||||
|
||||
// Dynamic uniform buffer
|
||||
mUniformBuffers[1] = utils::CreateBufferFromData(device, uniformData.data(), kBufferSize,
|
||||
mUniformBuffers[1] = utils::CreateBufferFromData(device, uniformData.data(),
|
||||
sizeof(uint32_t) * uniformData.size(),
|
||||
wgpu::BufferUsage::Uniform);
|
||||
|
||||
wgpu::BufferDescriptor storageBufferDescriptor;
|
||||
storageBufferDescriptor.size = kBufferSize;
|
||||
storageBufferDescriptor.size = sizeof(uint32_t) * uniformData.size();
|
||||
storageBufferDescriptor.usage =
|
||||
wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
|
||||
|
||||
|
@ -72,7 +75,8 @@ class DynamicBufferOffsetTests : public DawnTest {
|
|||
{4, mStorageBuffers[1], 0, kBindingSize}});
|
||||
|
||||
// Extra uniform buffer for inheriting test
|
||||
mUniformBuffers[2] = utils::CreateBufferFromData(device, uniformData.data(), kBufferSize,
|
||||
mUniformBuffers[2] = utils::CreateBufferFromData(device, uniformData.data(),
|
||||
sizeof(uint32_t) * uniformData.size(),
|
||||
wgpu::BufferUsage::Uniform);
|
||||
|
||||
// Bind group layout for inheriting test
|
||||
|
@ -86,6 +90,7 @@ class DynamicBufferOffsetTests : public DawnTest {
|
|||
}
|
||||
// Create objects to use as resources inside test bind groups.
|
||||
|
||||
uint32_t mMinUniformBufferOffsetAlignment;
|
||||
wgpu::BindGroup mBindGroups[2];
|
||||
wgpu::BindGroupLayout mBindGroupLayouts[2];
|
||||
wgpu::Buffer mUniformBuffers[3];
|
||||
|
@ -227,8 +232,8 @@ TEST_P(DynamicBufferOffsetTests, SetDynamicOffsetsRenderPipeline) {
|
|||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
std::array<uint32_t, 2> offsets = {kMinUniformBufferOffsetAlignment,
|
||||
kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
|
||||
mMinUniformBufferOffsetAlignment};
|
||||
wgpu::RenderPassEncoder renderPassEncoder =
|
||||
commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
|
||||
renderPassEncoder.SetPipeline(pipeline);
|
||||
|
@ -241,7 +246,7 @@ TEST_P(DynamicBufferOffsetTests, SetDynamicOffsetsRenderPipeline) {
|
|||
std::vector<uint32_t> expectedData = {6, 8};
|
||||
EXPECT_PIXEL_RGBA8_EQ(RGBA8(5, 6, 255, 255), renderPass.color, 0, 0);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
||||
kMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
mMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
}
|
||||
|
||||
// Dynamic offsets are all zero and no effect to result.
|
||||
|
@ -267,8 +272,8 @@ TEST_P(DynamicBufferOffsetTests, BasicComputePipeline) {
|
|||
TEST_P(DynamicBufferOffsetTests, SetDynamicOffsetsComputePipeline) {
|
||||
wgpu::ComputePipeline pipeline = CreateComputePipeline();
|
||||
|
||||
std::array<uint32_t, 2> offsets = {kMinUniformBufferOffsetAlignment,
|
||||
kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
|
||||
mMinUniformBufferOffsetAlignment};
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
||||
|
@ -281,7 +286,7 @@ TEST_P(DynamicBufferOffsetTests, SetDynamicOffsetsComputePipeline) {
|
|||
|
||||
std::vector<uint32_t> expectedData = {6, 8};
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
||||
kMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
mMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
}
|
||||
|
||||
// Test inherit dynamic offsets on render pipeline
|
||||
|
@ -293,8 +298,8 @@ TEST_P(DynamicBufferOffsetTests, InheritDynamicOffsetsRenderPipeline) {
|
|||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
std::array<uint32_t, 2> offsets = {kMinUniformBufferOffsetAlignment,
|
||||
kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
|
||||
mMinUniformBufferOffsetAlignment};
|
||||
wgpu::RenderPassEncoder renderPassEncoder =
|
||||
commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
|
||||
renderPassEncoder.SetPipeline(pipeline);
|
||||
|
@ -310,7 +315,7 @@ TEST_P(DynamicBufferOffsetTests, InheritDynamicOffsetsRenderPipeline) {
|
|||
std::vector<uint32_t> expectedData = {12, 16};
|
||||
EXPECT_PIXEL_RGBA8_EQ(RGBA8(5, 6, 255, 255), renderPass.color, 0, 0);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
||||
kMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
mMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
}
|
||||
|
||||
// Test inherit dynamic offsets on compute pipeline
|
||||
|
@ -322,8 +327,8 @@ TEST_P(DynamicBufferOffsetTests, InheritDynamicOffsetsComputePipeline) {
|
|||
wgpu::ComputePipeline pipeline = CreateComputePipeline();
|
||||
wgpu::ComputePipeline testPipeline = CreateComputePipeline(true);
|
||||
|
||||
std::array<uint32_t, 2> offsets = {kMinUniformBufferOffsetAlignment,
|
||||
kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
|
||||
mMinUniformBufferOffsetAlignment};
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
||||
|
@ -339,7 +344,7 @@ TEST_P(DynamicBufferOffsetTests, InheritDynamicOffsetsComputePipeline) {
|
|||
|
||||
std::vector<uint32_t> expectedData = {12, 16};
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
||||
kMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
mMinUniformBufferOffsetAlignment, expectedData.size());
|
||||
}
|
||||
|
||||
// Setting multiple dynamic offsets for the same bindgroup in one render pass.
|
||||
|
@ -350,8 +355,8 @@ TEST_P(DynamicBufferOffsetTests, UpdateDynamicOffsetsMultipleTimesRenderPipeline
|
|||
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
std::array<uint32_t, 2> offsets = {kMinUniformBufferOffsetAlignment,
|
||||
kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
|
||||
mMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> testOffsets = {0, 0};
|
||||
|
||||
wgpu::RenderPassEncoder renderPassEncoder =
|
||||
|
@ -374,8 +379,8 @@ TEST_P(DynamicBufferOffsetTests, UpdateDynamicOffsetsMultipleTimesRenderPipeline
|
|||
TEST_P(DynamicBufferOffsetTests, UpdateDynamicOffsetsMultipleTimesComputePipeline) {
|
||||
wgpu::ComputePipeline pipeline = CreateComputePipeline();
|
||||
|
||||
std::array<uint32_t, 2> offsets = {kMinUniformBufferOffsetAlignment,
|
||||
kMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> offsets = {mMinUniformBufferOffsetAlignment,
|
||||
mMinUniformBufferOffsetAlignment};
|
||||
std::array<uint32_t, 2> testOffsets = {0, 0};
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
|
|
|
@ -115,6 +115,7 @@ TEST_P(SwapChainValidationTests, CreationSuccess) {
|
|||
|
||||
// Checks that the creation size must be a valid 2D texture size.
|
||||
TEST_P(SwapChainValidationTests, InvalidCreationSize) {
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
// A width of 0 is invalid.
|
||||
{
|
||||
wgpu::SwapChainDescriptor desc = goodDescriptor;
|
||||
|
@ -128,23 +129,23 @@ TEST_P(SwapChainValidationTests, InvalidCreationSize) {
|
|||
ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
|
||||
}
|
||||
|
||||
// A width of kMaxTextureDimension2D is valid but kMaxTextureDimension2D + 1 isn't.
|
||||
// A width of maxTextureDimension2D is valid but maxTextureDimension2D + 1 isn't.
|
||||
{
|
||||
wgpu::SwapChainDescriptor desc = goodDescriptor;
|
||||
desc.width = kMaxTextureDimension2D;
|
||||
desc.width = supportedLimits.maxTextureDimension2D;
|
||||
device.CreateSwapChain(surface, &desc);
|
||||
|
||||
desc.width = kMaxTextureDimension2D + 1;
|
||||
desc.width = supportedLimits.maxTextureDimension2D + 1;
|
||||
ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
|
||||
}
|
||||
|
||||
// A height of kMaxTextureDimension2D is valid but kMaxTextureDimension2D + 1 isn't.
|
||||
// A height of maxTextureDimension2D is valid but maxTextureDimension2D + 1 isn't.
|
||||
{
|
||||
wgpu::SwapChainDescriptor desc = goodDescriptor;
|
||||
desc.height = kMaxTextureDimension2D;
|
||||
desc.height = supportedLimits.maxTextureDimension2D;
|
||||
device.CreateSwapChain(surface, &desc);
|
||||
|
||||
desc.height = kMaxTextureDimension2D + 1;
|
||||
desc.height = supportedLimits.maxTextureDimension2D + 1;
|
||||
ASSERT_DEVICE_ERROR(device.CreateSwapChain(surface, &desc));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -270,7 +270,8 @@ void DrawCallPerf::SetUp() {
|
|||
DawnPerfTestWithParams::SetUp();
|
||||
|
||||
// Compute aligned uniform / vertex data sizes.
|
||||
mAlignedUniformSize = Align(kUniformSize, kMinUniformBufferOffsetAlignment);
|
||||
mAlignedUniformSize =
|
||||
Align(kUniformSize, GetSupportedLimits().limits.minUniformBufferOffsetAlignment);
|
||||
mAlignedVertexDataSize = Align(sizeof(kVertexData), 4);
|
||||
|
||||
// Initialize uniform buffer data.
|
||||
|
|
|
@ -708,8 +708,10 @@ TEST_F(BindGroupValidationTest, BufferBindingOOB) {
|
|||
|
||||
// Tests constraints to be sure the uniform buffer binding isn't too large
|
||||
TEST_F(BindGroupValidationTest, MaxUniformBufferBindingSize) {
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
|
||||
wgpu::BufferDescriptor descriptor;
|
||||
descriptor.size = 2 * kMaxUniformBufferBindingSize;
|
||||
descriptor.size = 2 * supportedLimits.maxUniformBufferBindingSize;
|
||||
descriptor.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
|
||||
|
||||
|
@ -717,7 +719,8 @@ TEST_F(BindGroupValidationTest, MaxUniformBufferBindingSize) {
|
|||
device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}});
|
||||
|
||||
// Success case, this is exactly the limit
|
||||
utils::MakeBindGroup(device, uniformLayout, {{0, buffer, 0, kMaxUniformBufferBindingSize}});
|
||||
utils::MakeBindGroup(device, uniformLayout,
|
||||
{{0, buffer, 0, supportedLimits.maxUniformBufferBindingSize}});
|
||||
|
||||
wgpu::BindGroupLayout doubleUniformLayout = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
|
||||
|
@ -725,12 +728,13 @@ TEST_F(BindGroupValidationTest, MaxUniformBufferBindingSize) {
|
|||
|
||||
// Success case, individual bindings don't exceed the limit
|
||||
utils::MakeBindGroup(device, doubleUniformLayout,
|
||||
{{0, buffer, 0, kMaxUniformBufferBindingSize},
|
||||
{1, buffer, kMaxUniformBufferBindingSize, kMaxUniformBufferBindingSize}});
|
||||
{{0, buffer, 0, supportedLimits.maxUniformBufferBindingSize},
|
||||
{1, buffer, supportedLimits.maxUniformBufferBindingSize,
|
||||
supportedLimits.maxUniformBufferBindingSize}});
|
||||
|
||||
// Error case, this is above the limit
|
||||
ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, uniformLayout,
|
||||
{{0, buffer, 0, kMaxUniformBufferBindingSize + 1}}));
|
||||
ASSERT_DEVICE_ERROR(utils::MakeBindGroup(
|
||||
device, uniformLayout, {{0, buffer, 0, supportedLimits.maxUniformBufferBindingSize + 1}}));
|
||||
|
||||
// Making sure the constraint doesn't apply to storage buffers
|
||||
wgpu::BindGroupLayout readonlyStorageLayout = utils::MakeBindGroupLayout(
|
||||
|
@ -740,14 +744,17 @@ TEST_F(BindGroupValidationTest, MaxUniformBufferBindingSize) {
|
|||
|
||||
// Success case, storage buffer can still be created.
|
||||
utils::MakeBindGroup(device, readonlyStorageLayout,
|
||||
{{0, buffer, 0, 2 * kMaxUniformBufferBindingSize}});
|
||||
utils::MakeBindGroup(device, storageLayout, {{0, buffer, 0, 2 * kMaxUniformBufferBindingSize}});
|
||||
{{0, buffer, 0, 2 * supportedLimits.maxUniformBufferBindingSize}});
|
||||
utils::MakeBindGroup(device, storageLayout,
|
||||
{{0, buffer, 0, 2 * supportedLimits.maxUniformBufferBindingSize}});
|
||||
}
|
||||
|
||||
// Tests constraints to be sure the storage buffer binding isn't too large
|
||||
TEST_F(BindGroupValidationTest, MaxStorageBufferBindingSize) {
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
|
||||
wgpu::BufferDescriptor descriptor;
|
||||
descriptor.size = 2 * kMaxStorageBufferBindingSize;
|
||||
descriptor.size = 2 * supportedLimits.maxStorageBufferBindingSize;
|
||||
descriptor.usage = wgpu::BufferUsage::Storage;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
|
||||
|
||||
|
@ -755,10 +762,12 @@ TEST_F(BindGroupValidationTest, MaxStorageBufferBindingSize) {
|
|||
device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
|
||||
|
||||
// Success case, this is exactly the limit
|
||||
utils::MakeBindGroup(device, uniformLayout, {{0, buffer, 0, kMaxStorageBufferBindingSize}});
|
||||
utils::MakeBindGroup(device, uniformLayout,
|
||||
{{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize}});
|
||||
|
||||
// Success case, this is one less than the limit (check it is not an alignment constraint)
|
||||
utils::MakeBindGroup(device, uniformLayout, {{0, buffer, 0, kMaxStorageBufferBindingSize - 1}});
|
||||
utils::MakeBindGroup(device, uniformLayout,
|
||||
{{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize - 1}});
|
||||
|
||||
wgpu::BindGroupLayout doubleUniformLayout = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage},
|
||||
|
@ -766,12 +775,13 @@ TEST_F(BindGroupValidationTest, MaxStorageBufferBindingSize) {
|
|||
|
||||
// Success case, individual bindings don't exceed the limit
|
||||
utils::MakeBindGroup(device, doubleUniformLayout,
|
||||
{{0, buffer, 0, kMaxStorageBufferBindingSize},
|
||||
{1, buffer, kMaxStorageBufferBindingSize, kMaxStorageBufferBindingSize}});
|
||||
{{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize},
|
||||
{1, buffer, supportedLimits.maxStorageBufferBindingSize,
|
||||
supportedLimits.maxStorageBufferBindingSize}});
|
||||
|
||||
// Error case, this is above the limit
|
||||
ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, uniformLayout,
|
||||
{{0, buffer, 0, kMaxStorageBufferBindingSize + 1}}));
|
||||
ASSERT_DEVICE_ERROR(utils::MakeBindGroup(
|
||||
device, uniformLayout, {{0, buffer, 0, supportedLimits.maxStorageBufferBindingSize + 1}}));
|
||||
}
|
||||
|
||||
// Test what happens when the layout is an error.
|
||||
|
@ -1306,7 +1316,6 @@ TEST_F(BindGroupLayoutValidationTest, MultisampledTextureSampleType) {
|
|||
});
|
||||
}
|
||||
|
||||
constexpr uint64_t kBufferSize = 3 * kMinUniformBufferOffsetAlignment + 8;
|
||||
constexpr uint32_t kBindingSize = 9;
|
||||
|
||||
class SetBindGroupValidationTest : public ValidationTest {
|
||||
|
@ -1323,6 +1332,9 @@ class SetBindGroupValidationTest : public ValidationTest {
|
|||
wgpu::BufferBindingType::Storage, true},
|
||||
{3, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
||||
wgpu::BufferBindingType::ReadOnlyStorage, true}});
|
||||
mMinUniformBufferOffsetAlignment =
|
||||
GetSupportedLimits().limits.minUniformBufferOffsetAlignment;
|
||||
mBufferSize = 3 * mMinUniformBufferOffsetAlignment + 8;
|
||||
}
|
||||
|
||||
wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
|
||||
|
@ -1431,14 +1443,18 @@ class SetBindGroupValidationTest : public ValidationTest {
|
|||
commandEncoder.Finish();
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
uint32_t mMinUniformBufferOffsetAlignment;
|
||||
uint64_t mBufferSize;
|
||||
};
|
||||
|
||||
// This is the test case that should work.
|
||||
TEST_F(SetBindGroupValidationTest, Basic) {
|
||||
// Set up the bind group.
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1461,9 +1477,9 @@ TEST_F(SetBindGroupValidationTest, MissingBindGroup) {
|
|||
// Setting bind group after a draw / dispatch should re-verify the layout is compatible
|
||||
TEST_F(SetBindGroupValidationTest, VerifyGroupIfChangedAfterAction) {
|
||||
// Set up the bind group
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1510,9 +1526,9 @@ TEST_F(SetBindGroupValidationTest, VerifyGroupIfChangedAfterAction) {
|
|||
// Test cases that test dynamic offsets count mismatch with bind group layout.
|
||||
TEST_F(SetBindGroupValidationTest, DynamicOffsetsMismatch) {
|
||||
// Set up bind group.
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1534,9 +1550,9 @@ TEST_F(SetBindGroupValidationTest, DynamicOffsetsMismatch) {
|
|||
// Test cases that test dynamic offsets not aligned
|
||||
TEST_F(SetBindGroupValidationTest, DynamicOffsetsNotAligned) {
|
||||
// Set up bind group.
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1554,9 +1570,9 @@ TEST_F(SetBindGroupValidationTest, DynamicOffsetsNotAligned) {
|
|||
// Test cases that test dynamic uniform buffer out of bound situation.
|
||||
TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicUniformBuffer) {
|
||||
// Set up bind group.
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1574,9 +1590,9 @@ TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicUniformBuffer) {
|
|||
// Test cases that test dynamic storage buffer out of bound situation.
|
||||
TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicStorageBuffer) {
|
||||
// Set up bind group.
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1594,9 +1610,9 @@ TEST_F(SetBindGroupValidationTest, OffsetOutOfBoundDynamicStorageBuffer) {
|
|||
// Test cases that test dynamic uniform buffer out of bound situation because of binding size.
|
||||
TEST_F(SetBindGroupValidationTest, BindingSizeOutOfBoundDynamicUniformBuffer) {
|
||||
// Set up bind group, but binding size is larger than
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1614,9 +1630,9 @@ TEST_F(SetBindGroupValidationTest, BindingSizeOutOfBoundDynamicUniformBuffer) {
|
|||
|
||||
// Test cases that test dynamic storage buffer out of bound situation because of binding size.
|
||||
TEST_F(SetBindGroupValidationTest, BindingSizeOutOfBoundDynamicStorageBuffer) {
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer readonlyStorageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, mBindGroupLayout,
|
||||
{{0, uniformBuffer, 0, kBindingSize},
|
||||
{1, uniformBuffer, 0, kBindingSize},
|
||||
|
@ -1650,11 +1666,11 @@ TEST_F(SetBindGroupValidationTest, DynamicOffsetOrder) {
|
|||
// end of the buffer. Any mismatch applying too-large of an offset to a smaller buffer will hit
|
||||
// the out-of-bounds condition during validation.
|
||||
wgpu::Buffer buffer3x =
|
||||
CreateBuffer(3 * kMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage);
|
||||
CreateBuffer(3 * mMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer buffer2x =
|
||||
CreateBuffer(2 * kMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage);
|
||||
CreateBuffer(2 * mMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer buffer1x =
|
||||
CreateBuffer(1 * kMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Uniform);
|
||||
CreateBuffer(1 * mMinUniformBufferOffsetAlignment + 4, wgpu::BufferUsage::Uniform);
|
||||
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl,
|
||||
{
|
||||
{0, buffer3x, 0, 4},
|
||||
|
@ -1678,7 +1694,7 @@ TEST_F(SetBindGroupValidationTest, DynamicOffsetOrder) {
|
|||
// Offset the first binding to touch the end of the buffer. Should succeed.
|
||||
// Will fail if the offset is applied to the first or second bindings since their buffers
|
||||
// are too small.
|
||||
offsets = {/* binding 0 */ 3 * kMinUniformBufferOffsetAlignment,
|
||||
offsets = {/* binding 0 */ 3 * mMinUniformBufferOffsetAlignment,
|
||||
/* binding 2 */ 0,
|
||||
/* binding 3 */ 0};
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
|
@ -1690,7 +1706,7 @@ TEST_F(SetBindGroupValidationTest, DynamicOffsetOrder) {
|
|||
{
|
||||
// Offset the second binding to touch the end of the buffer. Should succeed.
|
||||
offsets = {/* binding 0 */ 0,
|
||||
/* binding 2 */ 1 * kMinUniformBufferOffsetAlignment,
|
||||
/* binding 2 */ 1 * mMinUniformBufferOffsetAlignment,
|
||||
/* binding 3 */ 0};
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
||||
|
@ -1704,7 +1720,7 @@ TEST_F(SetBindGroupValidationTest, DynamicOffsetOrder) {
|
|||
// is too small.
|
||||
offsets = {/* binding 0 */ 0,
|
||||
/* binding 2 */ 0,
|
||||
/* binding 3 */ 2 * kMinUniformBufferOffsetAlignment};
|
||||
/* binding 3 */ 2 * mMinUniformBufferOffsetAlignment};
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
||||
computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
|
||||
|
@ -1713,9 +1729,9 @@ TEST_F(SetBindGroupValidationTest, DynamicOffsetOrder) {
|
|||
}
|
||||
{
|
||||
// Offset each binding to touch the end of their buffer. Should succeed.
|
||||
offsets = {/* binding 0 */ 3 * kMinUniformBufferOffsetAlignment,
|
||||
/* binding 2 */ 1 * kMinUniformBufferOffsetAlignment,
|
||||
/* binding 3 */ 2 * kMinUniformBufferOffsetAlignment};
|
||||
offsets = {/* binding 0 */ 3 * mMinUniformBufferOffsetAlignment,
|
||||
/* binding 2 */ 1 * mMinUniformBufferOffsetAlignment,
|
||||
/* binding 3 */ 2 * mMinUniformBufferOffsetAlignment};
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
||||
computePassEncoder.SetBindGroup(0, bindGroup, offsets.size(), offsets.data());
|
||||
|
@ -1745,6 +1761,8 @@ class SetBindGroupPersistenceValidationTest : public ValidationTest {
|
|||
[[stage(vertex)]] fn main() -> [[builtin(position)]] vec4<f32> {
|
||||
return vec4<f32>();
|
||||
})");
|
||||
|
||||
mBufferSize = 3 * GetSupportedLimits().limits.minUniformBufferOffsetAlignment + 8;
|
||||
}
|
||||
|
||||
wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
|
||||
|
@ -1824,6 +1842,9 @@ class SetBindGroupPersistenceValidationTest : public ValidationTest {
|
|||
return std::make_tuple(bindGroupLayouts, pipeline);
|
||||
}
|
||||
|
||||
protected:
|
||||
uint32_t mBufferSize;
|
||||
|
||||
private:
|
||||
wgpu::ShaderModule mVsModule;
|
||||
};
|
||||
|
@ -1843,8 +1864,8 @@ TEST_F(SetBindGroupPersistenceValidationTest, BindGroupBeforePipeline) {
|
|||
}},
|
||||
}});
|
||||
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
|
||||
wgpu::BindGroup bindGroup0 = utils::MakeBindGroup(
|
||||
device, bindGroupLayouts[0],
|
||||
|
@ -1897,8 +1918,8 @@ TEST_F(SetBindGroupPersistenceValidationTest, NotVulkanInheritance) {
|
|||
}},
|
||||
}});
|
||||
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
wgpu::Buffer uniformBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
wgpu::Buffer storageBuffer = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
|
||||
wgpu::BindGroup bindGroupA0 = utils::MakeBindGroup(
|
||||
device, bindGroupLayoutsA[0],
|
||||
|
@ -2152,6 +2173,11 @@ TEST_F(BindGroupLayoutCompatibilityTest, ExternalTextureBindGroupLayoutCompatibi
|
|||
|
||||
class BindingsValidationTest : public BindGroupLayoutCompatibilityTest {
|
||||
public:
|
||||
void SetUp() override {
|
||||
BindGroupLayoutCompatibilityTest::SetUp();
|
||||
mBufferSize = 3 * GetSupportedLimits().limits.minUniformBufferOffsetAlignment + 8;
|
||||
}
|
||||
|
||||
void TestRenderPassBindings(const wgpu::BindGroup* bg,
|
||||
uint32_t count,
|
||||
wgpu::RenderPipeline pipeline,
|
||||
|
@ -2191,6 +2217,7 @@ class BindingsValidationTest : public BindGroupLayoutCompatibilityTest {
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t mBufferSize;
|
||||
static constexpr uint32_t kBindingNum = 3;
|
||||
};
|
||||
|
||||
|
@ -2263,7 +2290,7 @@ TEST_F(BindingsValidationTest, BindGroupsWithMoreBindingsThanPipelineLayout) {
|
|||
bgl[i] = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
||||
wgpu::BufferBindingType::Storage}});
|
||||
buffer[i] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
buffer[i] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
bg[i] = utils::MakeBindGroup(device, bgl[i], {{0, buffer[i]}});
|
||||
}
|
||||
|
||||
|
@ -2284,7 +2311,7 @@ TEST_F(BindingsValidationTest, BindGroupsWithMoreBindingsThanPipelineLayout) {
|
|||
wgpu::BufferBindingType::ReadOnlyStorage},
|
||||
{1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
||||
wgpu::BufferBindingType::Uniform}});
|
||||
buffer[1] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
|
||||
buffer[1] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
|
||||
bg[1] = utils::MakeBindGroup(device, bgl[1], {{0, buffer[1]}, {1, buffer[1]}});
|
||||
|
||||
TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false);
|
||||
|
@ -2304,7 +2331,7 @@ TEST_F(BindingsValidationTest, BindGroupsWithLessBindingsThanPipelineLayout) {
|
|||
bgl[i] = utils::MakeBindGroupLayout(
|
||||
device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
||||
wgpu::BufferBindingType::Storage}});
|
||||
buffer[i] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Storage);
|
||||
buffer[i] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Storage);
|
||||
bg[i] = utils::MakeBindGroup(device, bgl[i], {{0, buffer[i]}});
|
||||
}
|
||||
|
||||
|
@ -2329,7 +2356,7 @@ TEST_F(BindingsValidationTest, BindGroupsWithLessBindingsThanPipelineLayout) {
|
|||
bgl[2] = utils::MakeBindGroupLayout(
|
||||
device, {{1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
||||
wgpu::BufferBindingType::Uniform}});
|
||||
buffer[2] = CreateBuffer(kBufferSize, wgpu::BufferUsage::Uniform);
|
||||
buffer[2] = CreateBuffer(mBufferSize, wgpu::BufferUsage::Uniform);
|
||||
bg[2] = utils::MakeBindGroup(device, bgl[2], {{1, buffer[2]}});
|
||||
|
||||
TestRenderPassBindings(bg.data(), kBindingNum, renderPipeline, false);
|
||||
|
|
|
@ -57,27 +57,30 @@ TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_SmallestValid) {
|
|||
|
||||
// Check that the largest allowed dispatch is OK.
|
||||
TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_LargestValid) {
|
||||
constexpr uint32_t kMax = kMaxComputePerDimensionDispatchSize;
|
||||
TestDispatch(kMax, kMax, kMax);
|
||||
const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
|
||||
TestDispatch(max, max, max);
|
||||
}
|
||||
|
||||
// Check that exceeding the maximum on the X dimension results in validation failure.
|
||||
TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidX) {
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(kMaxComputePerDimensionDispatchSize + 1, 1, 1));
|
||||
const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(max + 1, 1, 1));
|
||||
}
|
||||
|
||||
// Check that exceeding the maximum on the Y dimension results in validation failure.
|
||||
TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidY) {
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(1, kMaxComputePerDimensionDispatchSize + 1, 1));
|
||||
const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(1, max + 1, 1));
|
||||
}
|
||||
|
||||
// Check that exceeding the maximum on the Z dimension results in validation failure.
|
||||
TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidZ) {
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(1, 1, kMaxComputePerDimensionDispatchSize + 1));
|
||||
const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(1, 1, max + 1));
|
||||
}
|
||||
|
||||
// Check that exceeding the maximum on all dimensions results in validation failure.
|
||||
TEST_F(ComputeValidationTest, PerDimensionDispatchSizeLimits_InvalidAll) {
|
||||
constexpr uint32_t kMax = kMaxComputePerDimensionDispatchSize;
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(kMax + 1, kMax + 1, kMax + 1));
|
||||
const uint32_t max = GetSupportedLimits().limits.maxComputeWorkgroupsPerDimension;
|
||||
ASSERT_DEVICE_ERROR(TestDispatch(max + 1, max + 1, max + 1));
|
||||
}
|
||||
|
|
|
@ -444,29 +444,37 @@ TEST_F(ShaderModuleValidationTest, ComputeWorkgroupSizeLimits) {
|
|||
utils::CreateShaderModule(device, ss.str().c_str());
|
||||
};
|
||||
|
||||
MakeShaderWithWorkgroupSize(1, 1, 1);
|
||||
MakeShaderWithWorkgroupSize(kMaxComputeWorkgroupSizeX, 1, 1);
|
||||
MakeShaderWithWorkgroupSize(1, kMaxComputeWorkgroupSizeY, 1);
|
||||
MakeShaderWithWorkgroupSize(1, 1, kMaxComputeWorkgroupSizeZ);
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupSize(kMaxComputeWorkgroupSizeX + 1, 1, 1));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupSize(1, kMaxComputeWorkgroupSizeY + 1, 1));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupSize(1, 1, kMaxComputeWorkgroupSizeZ + 1));
|
||||
MakeShaderWithWorkgroupSize(1, 1, 1);
|
||||
MakeShaderWithWorkgroupSize(supportedLimits.maxComputeWorkgroupSizeX, 1, 1);
|
||||
MakeShaderWithWorkgroupSize(1, supportedLimits.maxComputeWorkgroupSizeY, 1);
|
||||
MakeShaderWithWorkgroupSize(1, 1, supportedLimits.maxComputeWorkgroupSizeZ);
|
||||
|
||||
ASSERT_DEVICE_ERROR(
|
||||
MakeShaderWithWorkgroupSize(supportedLimits.maxComputeWorkgroupSizeX + 1, 1, 1));
|
||||
ASSERT_DEVICE_ERROR(
|
||||
MakeShaderWithWorkgroupSize(1, supportedLimits.maxComputeWorkgroupSizeY + 1, 1));
|
||||
ASSERT_DEVICE_ERROR(
|
||||
MakeShaderWithWorkgroupSize(1, 1, supportedLimits.maxComputeWorkgroupSizeZ + 1));
|
||||
|
||||
// No individual dimension exceeds its limit, but the combined size should definitely exceed the
|
||||
// total invocation limit.
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupSize(
|
||||
kMaxComputeWorkgroupSizeX, kMaxComputeWorkgroupSizeY, kMaxComputeWorkgroupSizeZ));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupSize(supportedLimits.maxComputeWorkgroupSizeX,
|
||||
supportedLimits.maxComputeWorkgroupSizeY,
|
||||
supportedLimits.maxComputeWorkgroupSizeZ));
|
||||
}
|
||||
|
||||
// Tests that we validate workgroup storage size limits.
|
||||
TEST_F(ShaderModuleValidationTest, ComputeWorkgroupStorageSizeLimits) {
|
||||
DAWN_SKIP_TEST_IF(!HasToggleEnabled("use_tint_generator"));
|
||||
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
|
||||
constexpr uint32_t kVec4Size = 16;
|
||||
constexpr uint32_t kMaxVec4Count = kMaxComputeWorkgroupStorageSize / kVec4Size;
|
||||
const uint32_t maxVec4Count = supportedLimits.maxComputeWorkgroupStorageSize / kVec4Size;
|
||||
constexpr uint32_t kMat4Size = 64;
|
||||
constexpr uint32_t kMaxMat4Count = kMaxComputeWorkgroupStorageSize / kMat4Size;
|
||||
const uint32_t maxMat4Count = supportedLimits.maxComputeWorkgroupStorageSize / kMat4Size;
|
||||
|
||||
auto MakeShaderWithWorkgroupStorage = [this](uint32_t vec4_count, uint32_t mat4_count) {
|
||||
std::ostringstream ss;
|
||||
|
@ -484,14 +492,14 @@ TEST_F(ShaderModuleValidationTest, ComputeWorkgroupStorageSizeLimits) {
|
|||
};
|
||||
|
||||
MakeShaderWithWorkgroupStorage(1, 1);
|
||||
MakeShaderWithWorkgroupStorage(kMaxVec4Count, 0);
|
||||
MakeShaderWithWorkgroupStorage(0, kMaxMat4Count);
|
||||
MakeShaderWithWorkgroupStorage(kMaxVec4Count - 4, 1);
|
||||
MakeShaderWithWorkgroupStorage(4, kMaxMat4Count - 1);
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(kMaxVec4Count + 1, 0));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(kMaxVec4Count - 3, 1));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(0, kMaxMat4Count + 1));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(4, kMaxMat4Count));
|
||||
MakeShaderWithWorkgroupStorage(maxVec4Count, 0);
|
||||
MakeShaderWithWorkgroupStorage(0, maxMat4Count);
|
||||
MakeShaderWithWorkgroupStorage(maxVec4Count - 4, 1);
|
||||
MakeShaderWithWorkgroupStorage(4, maxMat4Count - 1);
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(maxVec4Count + 1, 0));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(maxVec4Count - 3, 1));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(0, maxMat4Count + 1));
|
||||
ASSERT_DEVICE_ERROR(MakeShaderWithWorkgroupStorage(4, maxMat4Count));
|
||||
}
|
||||
|
||||
// Test that numeric ID must be unique
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "tests/unittests/validation/ValidationTest.h"
|
||||
|
||||
#include "common/Constants.h"
|
||||
#include "common/Math.h"
|
||||
#include "utils/ComboRenderPipelineDescriptor.h"
|
||||
#include "utils/TextureUtils.h"
|
||||
#include "utils/WGPUHelpers.h"
|
||||
|
@ -264,12 +265,24 @@ namespace {
|
|||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
|
||||
// Mip level exceeding kMaxTexture2DMipLevels not allowed
|
||||
// Mip level equal to the maximum for a 2D texture is allowed
|
||||
{
|
||||
uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.size.width = 1 >> kMaxTexture2DMipLevels;
|
||||
descriptor.size.height = 1 >> kMaxTexture2DMipLevels;
|
||||
descriptor.mipLevelCount = kMaxTexture2DMipLevels + 1u;
|
||||
descriptor.size.width = maxTextureDimension2D;
|
||||
descriptor.size.height = maxTextureDimension2D;
|
||||
descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 1u;
|
||||
|
||||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
|
||||
// Mip level exceeding the maximum for a 2D texture not allowed
|
||||
{
|
||||
uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.size.width = maxTextureDimension2D;
|
||||
descriptor.size.height = maxTextureDimension2D;
|
||||
descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 2u;
|
||||
|
||||
ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
|
||||
}
|
||||
|
@ -278,26 +291,27 @@ namespace {
|
|||
// Test the validation of array layer count
|
||||
TEST_F(TextureValidationTest, ArrayLayerCount) {
|
||||
wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
|
||||
// Array layer count exceeding kMaxTextureArrayLayers is not allowed for 2D texture
|
||||
// Array layer count exceeding maxTextureArrayLayers is not allowed for 2D texture
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
|
||||
descriptor.size.depthOrArrayLayers = kMaxTextureArrayLayers + 1u;
|
||||
descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers + 1u;
|
||||
ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
|
||||
}
|
||||
|
||||
// Array layer count less than kMaxTextureArrayLayers is allowed
|
||||
// Array layer count less than maxTextureArrayLayers is allowed
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.size.depthOrArrayLayers = kMaxTextureArrayLayers >> 1;
|
||||
descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers >> 1;
|
||||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
|
||||
// Array layer count equal to kMaxTextureArrayLayers is allowed
|
||||
// Array layer count equal to maxTextureArrayLayers is allowed
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.size.depthOrArrayLayers = kMaxTextureArrayLayers;
|
||||
descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers;
|
||||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
}
|
||||
|
@ -305,15 +319,16 @@ namespace {
|
|||
// Test the validation of 2D texture size
|
||||
TEST_F(TextureValidationTest, 2DTextureSize) {
|
||||
wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
|
||||
// Out-of-bound texture dimension is not allowed
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.size.width = kMaxTextureDimension2D + 1u;
|
||||
descriptor.size.width = supportedLimits.maxTextureDimension2D + 1u;
|
||||
ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
|
||||
|
||||
descriptor.size.width = 1;
|
||||
descriptor.size.height = kMaxTextureDimension2D + 1u;
|
||||
descriptor.size.height = supportedLimits.maxTextureDimension2D + 1u;
|
||||
ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
|
||||
}
|
||||
|
||||
|
@ -334,16 +349,16 @@ namespace {
|
|||
// Texture size less than max dimension is allowed
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.size.width = kMaxTextureDimension2D >> 1;
|
||||
descriptor.size.height = kMaxTextureDimension2D >> 1;
|
||||
descriptor.size.width = supportedLimits.maxTextureDimension2D >> 1;
|
||||
descriptor.size.height = supportedLimits.maxTextureDimension2D >> 1;
|
||||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
|
||||
// Texture size equal to max dimension is allowed
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.size.width = kMaxTextureDimension2D;
|
||||
descriptor.size.height = kMaxTextureDimension2D;
|
||||
descriptor.size.width = supportedLimits.maxTextureDimension2D;
|
||||
descriptor.size.height = supportedLimits.maxTextureDimension2D;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
|
@ -352,19 +367,20 @@ namespace {
|
|||
// Test the validation of 3D texture size
|
||||
TEST_F(TextureValidationTest, 3DTextureSize) {
|
||||
wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
|
||||
wgpu::Limits supportedLimits = GetSupportedLimits().limits;
|
||||
|
||||
// Out-of-bound texture dimension is not allowed
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
|
||||
descriptor.size = {kMaxTextureDimension3D + 1u, 1, 1};
|
||||
descriptor.size = {supportedLimits.maxTextureDimension3D + 1u, 1, 1};
|
||||
ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
|
||||
|
||||
descriptor.size = {1, kMaxTextureDimension3D + 1u, 1};
|
||||
descriptor.size = {1, supportedLimits.maxTextureDimension3D + 1u, 1};
|
||||
ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
|
||||
|
||||
descriptor.size = {1, 1, kMaxTextureDimension3D + 1u};
|
||||
descriptor.size = {1, 1, supportedLimits.maxTextureDimension3D + 1u};
|
||||
ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
|
||||
}
|
||||
|
||||
|
@ -388,8 +404,9 @@ namespace {
|
|||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
|
||||
descriptor.size = {kMaxTextureDimension3D >> 1, kMaxTextureDimension3D >> 1,
|
||||
kMaxTextureDimension3D >> 1};
|
||||
descriptor.size = {supportedLimits.maxTextureDimension3D >> 1,
|
||||
supportedLimits.maxTextureDimension3D >> 1,
|
||||
supportedLimits.maxTextureDimension3D >> 1};
|
||||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
|
||||
|
@ -398,8 +415,9 @@ namespace {
|
|||
wgpu::TextureDescriptor descriptor = defaultDescriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
|
||||
descriptor.size = {kMaxTextureDimension3D, kMaxTextureDimension3D,
|
||||
kMaxTextureDimension3D};
|
||||
descriptor.size = {supportedLimits.maxTextureDimension3D,
|
||||
supportedLimits.maxTextureDimension3D,
|
||||
supportedLimits.maxTextureDimension3D};
|
||||
device.CreateTexture(&descriptor);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -179,6 +179,13 @@ bool ValidationTest::HasToggleEnabled(const char* toggle) const {
|
|||
}) != toggles.end();
|
||||
}
|
||||
|
||||
wgpu::SupportedLimits ValidationTest::GetSupportedLimits() {
|
||||
WGPUSupportedLimits supportedLimits;
|
||||
supportedLimits.nextInChain = nullptr;
|
||||
dawn_native::GetProcs().deviceGetLimits(backendDevice, &supportedLimits);
|
||||
return *reinterpret_cast<wgpu::SupportedLimits*>(&supportedLimits);
|
||||
}
|
||||
|
||||
WGPUDevice ValidationTest::CreateTestDevice() {
|
||||
// Disabled disallowing unsafe APIs so we can test them.
|
||||
dawn_native::DeviceDescriptor deviceDescriptor;
|
||||
|
|
|
@ -96,6 +96,11 @@ class ValidationTest : public testing::Test {
|
|||
|
||||
bool HasToggleEnabled(const char* toggle) const;
|
||||
|
||||
// TODO(crbug.com/dawn/689): Use limits returned from the wire
|
||||
// This is implemented here because tests need to always query
|
||||
// the |backendDevice| since limits are not implemented in the wire.
|
||||
wgpu::SupportedLimits GetSupportedLimits();
|
||||
|
||||
protected:
|
||||
virtual WGPUDevice CreateTestDevice();
|
||||
|
||||
|
|
Loading…
Reference in New Issue