Bubble up errors from EnsureSubresourceContentInitialized.

Bug: dawn:1336
Change-Id: I1fd189bd6e3689df6f10351e8ba19fee569bda23
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/122023
Kokoro: Kokoro <noreply+kokoro@google.com>
Commit-Queue: Loko Kung <lokokung@google.com>
Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
Loko Kung 2023-03-01 21:53:31 +00:00 committed by Dawn LUCI CQ
parent 84532462f6
commit 02e456c9fb
22 changed files with 242 additions and 149 deletions

View File

@ -55,18 +55,18 @@ MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
// combination of readonly usages.
for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
MaybeError error = {};
textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
DAWN_TRY(textureUsage.Iterate(
[&](const SubresourceRange&, const wgpu::TextureUsage& usage) -> MaybeError {
bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage);
if (!readOnly && !singleUse && !error.IsError()) {
error = DAWN_VALIDATION_ERROR(
if (!readOnly && !singleUse) {
return DAWN_VALIDATION_ERROR(
"%s usage (%s) includes writable usage and another usage in the same "
"synchronization scope.",
scope.textures[i], usage);
}
});
DAWN_TRY(std::move(error));
return {};
}));
}
return {};
}

View File

@ -18,11 +18,13 @@
#include <array>
#include <limits>
#include <memory>
#include <type_traits>
#include <vector>
#include "dawn/common/Assert.h"
#include "dawn/common/TypeTraits.h"
#include "dawn/native/EnumMaskIterator.h"
#include "dawn/native/Error.h"
#include "dawn/native/Subresource.h"
namespace dawn::native {
@ -120,17 +122,27 @@ class SubresourceStorage {
// same for multiple subresources.
const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
// Given an iterateFunc that's a function or function-like objet that can be called with
// arguments of type (const SubresourceRange& range, const T& data) and returns void,
// calls it with aggregate ranges if possible, such that each subresource is part of
// Given an iterateFunc that's a function or function-like object that can be called with
// arguments of type (const SubresourceRange& range, const T& data) and returns either void or
// MaybeError, calls it with aggregate ranges if possible, such that each subresource is part of
// exactly one of the ranges iterateFunc is called with (and obviously data is the value
// stored for that subresource). For example:
// stored for that subresource). Note that for MaybeError version, Iterate will return on the
// first error. Example usages:
//
// // Returning void version:
// subresources.Iterate([&](const SubresourceRange& range, const T& data) {
// // Do something with range and data.
// });
template <typename F>
void Iterate(F&& iterateFunc) const;
//
// // Return MaybeError version:
// DAWN_TRY(subresources.Iterate(
// [&](const SubresourceRange& range, const T& data) -> MaybeError {
// // Do something with range and data.
// // Return a MaybeError.
// })
// );
template <typename F, typename R = std::invoke_result_t<F, const SubresourceRange&, const T&>>
R Iterate(F&& iterateFunc) const;
// Given an updateFunc that's a function or function-like objet that can be called with
// arguments of type (const SubresourceRange& range, T* data) and returns void,
@ -239,6 +251,11 @@ SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
template <typename T>
template <typename F>
void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
ASSERT(range.baseArrayLayer < mArrayLayerCount &&
range.baseArrayLayer + range.layerCount <= mArrayLayerCount);
ASSERT(range.baseMipLevel < mMipLevelCount &&
range.baseMipLevel + range.levelCount <= mMipLevelCount);
bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
bool fullAspects =
range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
@ -351,8 +368,12 @@ void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeF
}
template <typename T>
template <typename F>
void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
template <typename F, typename R>
R SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
static_assert(std::is_same_v<R, MaybeError> || std::is_same_v<R, void>,
"R must be either void or MaybeError");
constexpr bool mayError = std::is_same_v<R, MaybeError>;
for (Aspect aspect : IterateEnumMask(mAspects)) {
uint32_t aspectIndex = GetAspectIndex(aspect);
@ -360,7 +381,11 @@ void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
if (mAspectCompressed[aspectIndex]) {
SubresourceRange range =
SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
if constexpr (mayError) {
DAWN_TRY(iterateFunc(range, DataInline(aspectIndex)));
} else {
iterateFunc(range, DataInline(aspectIndex));
}
continue;
}
@ -368,17 +393,28 @@ void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
// Fast path, call iterateFunc on the whole array layer at once.
if (LayerCompressed(aspectIndex, layer)) {
SubresourceRange range = GetFullLayerRange(aspect, layer);
if constexpr (mayError) {
DAWN_TRY(iterateFunc(range, Data(aspectIndex, layer)));
} else {
iterateFunc(range, Data(aspectIndex, layer));
}
continue;
}
// Slow path, call iterateFunc for each mip level.
for (uint32_t level = 0; level < mMipLevelCount; level++) {
SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
if constexpr (mayError) {
DAWN_TRY(iterateFunc(range, Data(aspectIndex, layer, level)));
} else {
iterateFunc(range, Data(aspectIndex, layer, level));
}
}
}
}
if constexpr (mayError) {
return {};
}
}
template <typename T>

View File

@ -322,11 +322,12 @@ void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
0);
}
// Records the necessary barriers for a synchronization scope using the resource usage
// data pre-computed in the frontend. Also performs lazy initialization if required.
// Returns whether any UAV are used in the synchronization scope.
bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
const SyncScopeResourceUsage& usages) {
// Records the necessary barriers for a synchronization scope using the resource usage data
// pre-computed in the frontend. Also performs lazy initialization if required. Returns whether any
// UAV are used in the synchronization scope if `passHasUAV` is passed and no errors are hit.
MaybeError TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
const SyncScopeResourceUsage& usages,
bool* passHasUAV = nullptr) {
std::vector<D3D12_RESOURCE_BARRIER> barriers;
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
@ -336,9 +337,8 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
for (size_t i = 0; i < usages.buffers.size(); ++i) {
Buffer* buffer = ToBackend(usages.buffers[i]);
// TODO(crbug.com/dawn/852): clear storage buffers with
// ClearUnorderedAccessView*().
buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
// TODO(crbug.com/dawn/852): clear storage buffers with ClearUnorderedAccessView*().
DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
D3D12_RESOURCE_BARRIER barrier;
if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
@ -356,13 +356,14 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
// Clear subresources that are not render attachments. Render attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
// subresource has not been initialized before the render pass.
usages.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
DAWN_TRY(usages.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError {
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
texture->EnsureSubresourceContentInitialized(commandContext, range);
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
}
textureUsages |= usage;
});
return {};
}));
ToBackend(usages.textures[i])
->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
@ -373,8 +374,11 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
commandList->ResourceBarrier(barriers.size(), barriers.data());
}
return (bufferUsages & wgpu::BufferUsage::Storage ||
textureUsages & wgpu::TextureUsage::StorageBinding);
if (passHasUAV) {
*passHasUAV = bufferUsages & wgpu::BufferUsage::Storage ||
textureUsages & wgpu::TextureUsage::StorageBinding;
}
return {};
}
} // anonymous namespace
@ -753,8 +757,10 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
BeginRenderPassCmd* beginRenderPassCmd =
mCommands.NextCommand<BeginRenderPassCmd>();
const bool passHasUAV = TransitionAndClearForSyncScope(
commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
bool passHasUAV;
DAWN_TRY(TransitionAndClearForSyncScope(
commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber],
&passHasUAV));
bindingTracker.SetInComputePass(false);
LazyClearRenderPassAttachments(beginRenderPassCmd);
@ -808,7 +814,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
copy->destination.mipLevel)) {
texture->SetIsSubresourceContentInitialized(true, subresources);
} else {
texture->EnsureSubresourceContentInitialized(commandContext, subresources);
DAWN_TRY(
texture->EnsureSubresourceContentInitialized(commandContext, subresources));
}
buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
@ -842,7 +849,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
SubresourceRange subresources =
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
texture->EnsureSubresourceContentInitialized(commandContext, subresources);
DAWN_TRY(
texture->EnsureSubresourceContentInitialized(commandContext, subresources));
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
subresources);
@ -875,12 +883,13 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
SubresourceRange dstRange =
GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
source->EnsureSubresourceContentInitialized(commandContext, srcRange);
DAWN_TRY(source->EnsureSubresourceContentInitialized(commandContext, srcRange));
if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
copy->destination.mipLevel)) {
destination->SetIsSubresourceContentInitialized(true, dstRange);
} else {
destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
DAWN_TRY(
destination->EnsureSubresourceContentInitialized(commandContext, dstRange));
}
if (copy->source.texture.Get() == copy->destination.texture.Get() &&
@ -1145,8 +1154,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandCont
break;
}
TransitionAndClearForSyncScope(commandContext,
resourceUsages.dispatchUsages[currentDispatch]);
DAWN_TRY(TransitionAndClearForSyncScope(
commandContext, resourceUsages.dispatchUsages[currentDispatch]));
DAWN_TRY(bindingTracker->Apply(commandContext));
RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
@ -1158,8 +1167,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandCont
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
TransitionAndClearForSyncScope(commandContext,
resourceUsages.dispatchUsages[currentDispatch]);
DAWN_TRY(TransitionAndClearForSyncScope(
commandContext, resourceUsages.dispatchUsages[currentDispatch]));
DAWN_TRY(bindingTracker->Apply(commandContext));
ComPtr<ID3D12CommandSignature> signature =

View File

@ -530,7 +530,7 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source,
if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst.mipLevel)) {
texture->SetIsSubresourceContentInitialized(true, range);
} else {
texture->EnsureSubresourceContentInitialized(commandContext, range);
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
}
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);

View File

@ -1212,17 +1212,17 @@ void Texture::SetLabelImpl() {
SetLabelHelper("Dawn_InternalTexture");
}
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
const SubresourceRange& range) {
if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
return {};
}
if (!IsSubresourceContentInitialized(range)) {
// If subresource has not been initialized, clear it to black as it could contain
// dirty bits from recycled memory
GetDevice()->ConsumedError(
ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
DAWN_TRY(ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
}
return {};
}
bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {

View File

@ -77,7 +77,7 @@ class Texture final : public TextureBase {
bool depthReadOnly,
bool stencilReadOnly) const;
void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
const SubresourceRange& range);
MaybeError SynchronizeImportedTextureBeforeUse();

View File

@ -735,23 +735,25 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
size_t nextRenderPassNumber = 0;
auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
CommandRecordingContext* commandContext) {
CommandRecordingContext* commandContext) -> MaybeError {
for (size_t i = 0; i < scope.textures.size(); ++i) {
Texture* texture = ToBackend(scope.textures[i]);
// Clear subresources that are not render attachments. Render attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
// subresource has not been initialized before the render pass.
scope.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
DAWN_TRY(scope.textureUsages[i].Iterate([&](const SubresourceRange& range,
wgpu::TextureUsage usage) -> MaybeError {
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
texture->EnsureSubresourceContentInitialized(commandContext, range);
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
}
});
return {};
}));
}
for (BufferBase* bufferBase : scope.buffers) {
ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
}
return {};
};
Command type;
@ -766,7 +768,7 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
}
for (const SyncScopeResourceUsage& scope :
GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
LazyClearSyncScope(scope, commandContext);
DAWN_TRY(LazyClearSyncScope(scope, commandContext));
}
commandContext->EndBlit();
@ -793,8 +795,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
}
}
}
LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
commandContext);
DAWN_TRY(LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
commandContext));
commandContext->EndBlit();
LazyClearRenderPassAttachments(cmd);
@ -858,7 +860,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
Texture* texture = ToBackend(dst.texture.Get());
buffer->EnsureDataInitialized(commandContext);
EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
DAWN_TRY(
EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize));
buffer->TrackUsage();
texture->SynchronizeTextureBeforeUse(commandContext);
@ -884,8 +887,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
buffer->EnsureDataInitializedAsDestination(commandContext, copy);
texture->SynchronizeTextureBeforeUse(commandContext);
texture->EnsureSubresourceContentInitialized(
commandContext, GetSubresourcesAffectedByCopy(src, copySize));
DAWN_TRY(texture->EnsureSubresourceContentInitialized(
commandContext, GetSubresourcesAffectedByCopy(src, copySize)));
buffer->TrackUsage();
TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
@ -975,10 +978,10 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
srcTexture->SynchronizeTextureBeforeUse(commandContext);
dstTexture->SynchronizeTextureBeforeUse(commandContext);
srcTexture->EnsureSubresourceContentInitialized(
commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
EnsureDestinationTextureInitialized(commandContext, dstTexture, copy->destination,
copy->copySize);
DAWN_TRY(srcTexture->EnsureSubresourceContentInitialized(
commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize)));
DAWN_TRY(EnsureDestinationTextureInitialized(commandContext, dstTexture,
copy->destination, copy->copySize));
const MTLSize sizeOneSlice =
MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);

View File

@ -395,8 +395,8 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source,
const Extent3D& copySizePixels) {
Texture* texture = ToBackend(dst.texture.Get());
texture->SynchronizeTextureBeforeUse(GetPendingCommandContext());
EnsureDestinationTextureInitialized(GetPendingCommandContext(DeviceBase::SubmitMode::Passive),
texture, dst, copySizePixels);
DAWN_TRY(EnsureDestinationTextureInitialized(
GetPendingCommandContext(DeviceBase::SubmitMode::Passive), texture, dst, copySizePixels));
RecordCopyBufferToTexture(GetPendingCommandContext(DeviceBase::SubmitMode::Passive),
ToBackend(source)->GetMTLBuffer(), source->GetSize(),

View File

@ -58,7 +58,7 @@ class Texture final : public TextureBase {
bool ShouldKeepInitialized() const;
MTLBlitOption ComputeMTLBlitOption(Aspect aspect) const;
void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
const SubresourceRange& range);
void SynchronizeTextureBeforeUse(CommandRecordingContext* commandContext);

View File

@ -1076,19 +1076,19 @@ MTLBlitOption Texture::ComputeMTLBlitOption(Aspect aspect) const {
return MTLBlitOptionNone;
}
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
const SubresourceRange& range) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
return {};
}
if (!IsSubresourceContentInitialized(range)) {
// If subresource has not been initialized, clear it to black as it could
// contain dirty bits from recycled memory
GetDevice()->ConsumedError(
ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
DAWN_TRY(ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
SetIsSubresourceContentInitialized(true, range);
GetDevice()->IncrementLazyClearCountForTesting();
}
return {};
}
// static
@ -1165,9 +1165,8 @@ MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
// TODO(enga): Add a workaround to back combined depth/stencil textures
// with Sampled usage using two separate textures.
// Or, consider always using the workaround for D32S8.
device->ConsumedError(
DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
"combined depth/stencil format."));
return DAWN_INTERNAL_ERROR("Cannot create stencil-only texture view of combined "
"depth/stencil format.");
}
} else if (GetTexture()->GetFormat().HasDepth() && GetTexture()->GetFormat().HasStencil()) {
// Depth-only views for depth/stencil textures in Metal simply use the original

View File

@ -75,7 +75,7 @@ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
uint32_t rowsPerImage,
Aspect aspect);
void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
MaybeError EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
Texture* texture,
const TextureCopy& dst,
const Extent3D& size);

View File

@ -344,7 +344,7 @@ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
return copy;
}
void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
MaybeError EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
Texture* texture,
const TextureCopy& dst,
const Extent3D& size) {
@ -353,8 +353,9 @@ void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
texture->SetIsSubresourceContentInitialized(true, range);
} else {
texture->EnsureSubresourceContentInitialized(commandContext, range);
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
}
return {};
}
MaybeError EncodeMetalRenderPass(Device* device,

View File

@ -451,24 +451,26 @@ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescrip
MaybeError CommandBuffer::Execute() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->GetGL();
auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) -> MaybeError {
for (size_t i = 0; i < scope.textures.size(); i++) {
Texture* texture = ToBackend(scope.textures[i]);
// Clear subresources that are not render attachments. Render attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
// subresource has not been initialized before the render pass.
scope.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
DAWN_TRY(scope.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError {
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
texture->EnsureSubresourceContentInitialized(range);
DAWN_TRY(texture->EnsureSubresourceContentInitialized(range));
}
});
return {};
}));
}
for (BufferBase* bufferBase : scope.buffers) {
ToBackend(bufferBase)->EnsureDataInitialized();
}
return {};
};
size_t nextComputePassNumber = 0;
@ -481,7 +483,7 @@ MaybeError CommandBuffer::Execute() {
mCommands.NextCommand<BeginComputePassCmd>();
for (const SyncScopeResourceUsage& scope :
GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
LazyClearSyncScope(scope);
DAWN_TRY(LazyClearSyncScope(scope));
}
DAWN_TRY(ExecuteComputePass());
@ -491,7 +493,8 @@ MaybeError CommandBuffer::Execute() {
case Command::BeginRenderPass: {
auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
DAWN_TRY(
LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]));
LazyClearRenderPassAttachments(cmd);
DAWN_TRY(ExecuteRenderPass(cmd));
@ -546,7 +549,7 @@ MaybeError CommandBuffer::Execute() {
dst.mipLevel)) {
dst.texture->SetIsSubresourceContentInitialized(true, range);
} else {
ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
DAWN_TRY(ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range));
}
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
@ -593,7 +596,7 @@ MaybeError CommandBuffer::Execute() {
buffer->EnsureDataInitializedAsDestination(copy);
SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize);
texture->EnsureSubresourceContentInitialized(subresources);
DAWN_TRY(texture->EnsureSubresourceContentInitialized(subresources));
// The only way to move data from a texture to a buffer in GL is via
// glReadPixels with a pack buffer. Create a temporary FBO for the copy.
gl.BindTexture(target, texture->GetHandle());
@ -694,11 +697,11 @@ MaybeError CommandBuffer::Execute() {
SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
srcTexture->EnsureSubresourceContentInitialized(srcRange);
DAWN_TRY(srcTexture->EnsureSubresourceContentInitialized(srcRange));
if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
} else {
dstTexture->EnsureSubresourceContentInitialized(dstRange);
DAWN_TRY(dstTexture->EnsureSubresourceContentInitialized(dstRange));
}
CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(), srcTexture->GetGLTarget(),
src.mipLevel, src.origin, dstTexture->GetHandle(),

View File

@ -250,7 +250,7 @@ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
return DAWN_VALIDATION_ERROR("New swapchains not implemented.");
}
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return AcquireRef(new Texture(this, descriptor));
return Texture::Create(this, descriptor);
}
ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
TextureBase* texture,
@ -319,10 +319,10 @@ TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor
if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
textureDescriptor->size.height != static_cast<uint32_t>(height) ||
textureDescriptor->size.depthOrArrayLayers != 1) {
ConsumedError(DAWN_VALIDATION_ERROR(
gl.DeleteTextures(1, &tex);
HandleError(DAWN_VALIDATION_ERROR(
"EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
width, height, &textureDescriptor->size));
gl.DeleteTextures(1, &tex);
return nullptr;
}

View File

@ -69,7 +69,7 @@ MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel, destination.mipLevel)) {
destination.texture->SetIsSubresourceContentInitialized(true, range);
} else {
ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
DAWN_TRY(ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range));
}
DoTexSubImage(ToBackend(GetDevice())->GetGL(), textureCopy, data, dataLayout, writeSizePixel);
ToBackend(destination.texture)->Touch();

View File

@ -15,6 +15,7 @@
#include "dawn/native/opengl/TextureGL.h"
#include <limits>
#include <utility>
#include "dawn/common/Assert.h"
#include "dawn/common/Constants.h"
@ -170,6 +171,16 @@ void AllocateTexture(const OpenGLFunctions& gl,
// Texture
// static
ResultOrError<Ref<Texture>> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
Ref<Texture> texture = AcquireRef(new Texture(device, descriptor));
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
DAWN_TRY(
texture->ClearTexture(texture->GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
return std::move(texture);
}
Texture::Texture(Device* device, const TextureDescriptor* descriptor)
: Texture(device, descriptor, 0, TextureState::OwnedInternal) {
const OpenGLFunctions& gl = device->GetGL();
@ -186,11 +197,6 @@ Texture::Texture(Device* device, const TextureDescriptor* descriptor)
// The texture is not complete if it uses mipmapping and not all levels up to
// MAX_LEVEL have been defined.
gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
GetDevice()->ConsumedError(
ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
}
void Texture::Touch() {
@ -539,13 +545,14 @@ MaybeError Texture::ClearTexture(const SubresourceRange& range,
return {};
}
void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
MaybeError Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
return {};
}
if (!IsSubresourceContentInitialized(range)) {
GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
DAWN_TRY(ClearTexture(range, TextureBase::ClearValue::Zero));
}
return {};
}
// TextureView

View File

@ -26,7 +26,7 @@ struct GLFormat;
class Texture final : public TextureBase {
public:
Texture(Device* device, const TextureDescriptor* descriptor);
static ResultOrError<Ref<Texture>> Create(Device* device, const TextureDescriptor* descriptor);
Texture(Device* device, const TextureDescriptor* descriptor, GLuint handle, TextureState state);
GLuint GetHandle() const;
@ -35,9 +35,10 @@ class Texture final : public TextureBase {
uint32_t GetGenID() const;
void Touch();
void EnsureSubresourceContentInitialized(const SubresourceRange& range);
MaybeError EnsureSubresourceContentInitialized(const SubresourceRange& range);
private:
Texture(Device* device, const TextureDescriptor* descriptor);
~Texture() override;
void DestroyImpl() override;

View File

@ -154,7 +154,7 @@ class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
// Records the necessary barriers for a synchronization scope using the resource usage
// data pre-computed in the frontend. Also performs lazy initialization if required.
void TransitionAndClearForSyncScope(Device* device,
MaybeError TransitionAndClearForSyncScope(Device* device,
CommandRecordingContext* recordingContext,
const SyncScopeResourceUsage& scope) {
std::vector<VkBufferMemoryBarrier> bufferBarriers;
@ -179,12 +179,13 @@ void TransitionAndClearForSyncScope(Device* device,
// Clear subresources that are not render attachments. Render attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
// subresource has not been initialized before the render pass.
scope.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
DAWN_TRY(scope.textureUsages[i].Iterate(
[&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError {
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
texture->EnsureSubresourceContentInitialized(recordingContext, range);
DAWN_TRY(texture->EnsureSubresourceContentInitialized(recordingContext, range));
}
});
return {};
}));
texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i], &imageBarriers,
&srcStages, &dstStages);
}
@ -194,6 +195,7 @@ void TransitionAndClearForSyncScope(Device* device,
nullptr, bufferBarriers.size(), bufferBarriers.data(),
imageBarriers.size(), imageBarriers.data());
}
return {};
}
MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
@ -512,8 +514,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
// And resets the used query sets which are rewritten on the render pass.
auto PrepareResourcesForRenderPass = [](Device* device,
CommandRecordingContext* recordingContext,
const RenderPassResourceUsage& usages) {
TransitionAndClearForSyncScope(device, recordingContext, usages);
const RenderPassResourceUsage& usages) -> MaybeError {
DAWN_TRY(TransitionAndClearForSyncScope(device, recordingContext, usages));
// Reset all query set used on current render pass together before beginning render pass
// because the reset command must be called outside render pass
@ -521,6 +523,7 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
usages.querySets[i], usages.queryAvailabilities[i]);
}
return {};
};
size_t nextComputePassNumber = 0;
@ -580,8 +583,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
// Since texture has been overwritten, it has been "initialized"
dst.texture->SetIsSubresourceContentInitialized(true, range);
} else {
ToBackend(dst.texture)
->EnsureSubresourceContentInitialized(recordingContext, range);
DAWN_TRY(ToBackend(dst.texture)
->EnsureSubresourceContentInitialized(recordingContext, range));
}
ToBackend(src.buffer)
->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
@ -614,8 +617,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
SubresourceRange range =
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
ToBackend(src.texture)
->EnsureSubresourceContentInitialized(recordingContext, range);
DAWN_TRY(ToBackend(src.texture)
->EnsureSubresourceContentInitialized(recordingContext, range));
ToBackend(src.texture)
->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
@ -642,15 +645,15 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
ToBackend(src.texture)
->EnsureSubresourceContentInitialized(recordingContext, srcRange);
DAWN_TRY(ToBackend(src.texture)
->EnsureSubresourceContentInitialized(recordingContext, srcRange));
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
dst.mipLevel)) {
// Since destination texture has been overwritten, it has been "initialized"
dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
} else {
ToBackend(dst.texture)
->EnsureSubresourceContentInitialized(recordingContext, dstRange);
DAWN_TRY(ToBackend(dst.texture)
->EnsureSubresourceContentInitialized(recordingContext, dstRange));
}
if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
@ -730,9 +733,9 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
PrepareResourcesForRenderPass(
DAWN_TRY(PrepareResourcesForRenderPass(
device, recordingContext,
GetResourceUsages().renderPasses[nextRenderPassNumber]);
GetResourceUsages().renderPasses[nextRenderPassNumber]));
LazyClearRenderPassAttachments(cmd);
DAWN_TRY(RecordRenderPass(recordingContext, cmd));
@ -935,8 +938,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingCo
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
TransitionAndClearForSyncScope(device, recordingContext,
resourceUsages.dispatchUsages[currentDispatch]);
DAWN_TRY(TransitionAndClearForSyncScope(
device, recordingContext, resourceUsages.dispatchUsages[currentDispatch]));
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
@ -948,8 +951,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingCo
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
TransitionAndClearForSyncScope(device, recordingContext,
resourceUsages.dispatchUsages[currentDispatch]);
DAWN_TRY(TransitionAndClearForSyncScope(
device, recordingContext, resourceUsages.dispatchUsages[currentDispatch]));
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
device->fn.CmdDispatchIndirect(commands, indirectBuffer,

View File

@ -816,7 +816,8 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source,
// Since texture has been overwritten, it has been "initialized"
dst.texture->SetIsSubresourceContentInitialized(true, range);
} else {
ToBackend(dst.texture)->EnsureSubresourceContentInitialized(recordingContext, range);
DAWN_TRY(
ToBackend(dst.texture)->EnsureSubresourceContentInitialized(recordingContext, range));
}
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
// texture.

View File

@ -1342,17 +1342,17 @@ MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
return {};
}
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
const SubresourceRange& range) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
return {};
}
if (!IsSubresourceContentInitialized(range)) {
// If subresource has not been initialized, clear it to black as it could contain dirty
// bits from recycled memory
GetDevice()->ConsumedError(
ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
DAWN_TRY(ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
}
return {};
}
void Texture::UpdateExternalSemaphoreHandle(ExternalSemaphoreHandle handle) {

View File

@ -84,7 +84,7 @@ class Texture final : public TextureBase {
void TransitionEagerlyForExport(CommandRecordingContext* recordingContext);
std::vector<VkSemaphore> AcquireWaitRequirements();
void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
const SubresourceRange& range);
VkImageLayout GetCurrentLayoutForSwapChain() const;

View File

@ -12,14 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include <string>
#include <vector>
#include "dawn/common/Log.h"
#include "dawn/native/SubresourceStorage.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace dawn::native {
using ::testing::HasSubstr;
// A fake class that replicates the behavior of SubresourceStorage but without any compression
// and is used to compare the results of operations on SubresourceStorage against the "ground
// truth" of FakeStorage.
@ -211,6 +216,31 @@ bool operator==(const SmallData& a, const SmallData& b) {
return a.value == b.value;
}
// Tests that the MaybeError version of Iterate returns the first error that it encounters.
TEST(SubresourceStorageTest, IterateMaybeError) {
// Create a resource with multiple layers of different data so that we can ensure that the
// iterate function runs more than once.
constexpr uint32_t kLayers = 4;
SubresourceStorage<uint32_t> s(Aspect::Color, kLayers, 1);
for (uint32_t layer = 0; layer < kLayers; layer++) {
s.Update(SubresourceRange::MakeSingle(Aspect::Color, layer, 0),
[&](const SubresourceRange&, uint32_t* data) { *data = layer + 1; });
}
// Make sure that the first error is returned.
uint32_t errorLayer = 0;
MaybeError maybeError =
s.Iterate([&](const SubresourceRange& range, const uint32_t& layer) -> MaybeError {
if (!errorLayer) {
errorLayer = layer;
}
return DAWN_VALIDATION_ERROR("Errored at layer: %d", layer);
});
ASSERT_TRUE(maybeError.IsError());
std::unique_ptr<ErrorData> error = maybeError.AcquireError();
EXPECT_THAT(error->GetFormattedMessage(), HasSubstr(std::to_string(errorLayer)));
}
// Test that the default value is correctly set.
TEST(SubresourceStorageTest, DefaultValue) {
// Test setting no default value for a primitive type.