diff --git a/src/dawn/native/CommandValidation.cpp b/src/dawn/native/CommandValidation.cpp index d328f82cc1..5f002dec9a 100644 --- a/src/dawn/native/CommandValidation.cpp +++ b/src/dawn/native/CommandValidation.cpp @@ -55,18 +55,18 @@ MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) { // combination of readonly usages. for (size_t i = 0; i < scope.textureUsages.size(); ++i) { const TextureSubresourceUsage& textureUsage = scope.textureUsages[i]; - MaybeError error = {}; - textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) { - bool readOnly = IsSubset(usage, kReadOnlyTextureUsages); - bool singleUse = wgpu::HasZeroOrOneBits(usage); - if (!readOnly && !singleUse && !error.IsError()) { - error = DAWN_VALIDATION_ERROR( - "%s usage (%s) includes writable usage and another usage in the same " - "synchronization scope.", - scope.textures[i], usage); - } - }); - DAWN_TRY(std::move(error)); + DAWN_TRY(textureUsage.Iterate( + [&](const SubresourceRange&, const wgpu::TextureUsage& usage) -> MaybeError { + bool readOnly = IsSubset(usage, kReadOnlyTextureUsages); + bool singleUse = wgpu::HasZeroOrOneBits(usage); + if (!readOnly && !singleUse) { + return DAWN_VALIDATION_ERROR( + "%s usage (%s) includes writable usage and another usage in the same " + "synchronization scope.", + scope.textures[i], usage); + } + return {}; + })); } return {}; } diff --git a/src/dawn/native/SubresourceStorage.h b/src/dawn/native/SubresourceStorage.h index cc3d10dacf..34e5dcf5a6 100644 --- a/src/dawn/native/SubresourceStorage.h +++ b/src/dawn/native/SubresourceStorage.h @@ -18,11 +18,13 @@ #include #include #include +#include #include #include "dawn/common/Assert.h" #include "dawn/common/TypeTraits.h" #include "dawn/native/EnumMaskIterator.h" +#include "dawn/native/Error.h" #include "dawn/native/Subresource.h" namespace dawn::native { @@ -120,17 +122,27 @@ class SubresourceStorage { // same for multiple subresources. const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const; - // Given an iterateFunc that's a function or function-like objet that can be called with - // arguments of type (const SubresourceRange& range, const T& data) and returns void, - // calls it with aggregate ranges if possible, such that each subresource is part of + // Given an iterateFunc that's a function or function-like object that can be called with + // arguments of type (const SubresourceRange& range, const T& data) and returns either void or + // MaybeError, calls it with aggregate ranges if possible, such that each subresource is part of // exactly one of the ranges iterateFunc is called with (and obviously data is the value - // stored for that subresource). For example: + // stored for that subresource). Note that for MaybeError version, Iterate will return on the + // first error. Example usages: // + // // Returning void version: // subresources.Iterate([&](const SubresourceRange& range, const T& data) { // // Do something with range and data. // }); - template - void Iterate(F&& iterateFunc) const; + // + // // Return MaybeError version: + // DAWN_TRY(subresources.Iterate( + // [&](const SubresourceRange& range, const T& data) -> MaybeError { + // // Do something with range and data. + // // Return a MaybeError. + // }) + // ); + template > + R Iterate(F&& iterateFunc) const; // Given an updateFunc that's a function or function-like objet that can be called with // arguments of type (const SubresourceRange& range, T* data) and returns void, @@ -239,6 +251,11 @@ SubresourceStorage::SubresourceStorage(Aspect aspects, template template void SubresourceStorage::Update(const SubresourceRange& range, F&& updateFunc) { + ASSERT(range.baseArrayLayer < mArrayLayerCount && + range.baseArrayLayer + range.layerCount <= mArrayLayerCount); + ASSERT(range.baseMipLevel < mMipLevelCount && + range.baseMipLevel + range.levelCount <= mMipLevelCount); + bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount; bool fullAspects = range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers; @@ -351,8 +368,12 @@ void SubresourceStorage::Merge(const SubresourceStorage& other, F&& mergeF } template -template -void SubresourceStorage::Iterate(F&& iterateFunc) const { +template +R SubresourceStorage::Iterate(F&& iterateFunc) const { + static_assert(std::is_same_v || std::is_same_v, + "R must be either void or MaybeError"); + constexpr bool mayError = std::is_same_v; + for (Aspect aspect : IterateEnumMask(mAspects)) { uint32_t aspectIndex = GetAspectIndex(aspect); @@ -360,7 +381,11 @@ void SubresourceStorage::Iterate(F&& iterateFunc) const { if (mAspectCompressed[aspectIndex]) { SubresourceRange range = SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount); - iterateFunc(range, DataInline(aspectIndex)); + if constexpr (mayError) { + DAWN_TRY(iterateFunc(range, DataInline(aspectIndex))); + } else { + iterateFunc(range, DataInline(aspectIndex)); + } continue; } @@ -368,17 +393,28 @@ void SubresourceStorage::Iterate(F&& iterateFunc) const { // Fast path, call iterateFunc on the whole array layer at once. if (LayerCompressed(aspectIndex, layer)) { SubresourceRange range = GetFullLayerRange(aspect, layer); - iterateFunc(range, Data(aspectIndex, layer)); + if constexpr (mayError) { + DAWN_TRY(iterateFunc(range, Data(aspectIndex, layer))); + } else { + iterateFunc(range, Data(aspectIndex, layer)); + } continue; } // Slow path, call iterateFunc for each mip level. for (uint32_t level = 0; level < mMipLevelCount; level++) { SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level); - iterateFunc(range, Data(aspectIndex, layer, level)); + if constexpr (mayError) { + DAWN_TRY(iterateFunc(range, Data(aspectIndex, layer, level))); + } else { + iterateFunc(range, Data(aspectIndex, layer, level)); + } } } } + if constexpr (mayError) { + return {}; + } } template diff --git a/src/dawn/native/d3d12/CommandBufferD3D12.cpp b/src/dawn/native/d3d12/CommandBufferD3D12.cpp index 1ddead198a..53aa85617e 100644 --- a/src/dawn/native/d3d12/CommandBufferD3D12.cpp +++ b/src/dawn/native/d3d12/CommandBufferD3D12.cpp @@ -322,11 +322,12 @@ void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList, 0); } -// Records the necessary barriers for a synchronization scope using the resource usage -// data pre-computed in the frontend. Also performs lazy initialization if required. -// Returns whether any UAV are used in the synchronization scope. -bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext, - const SyncScopeResourceUsage& usages) { +// Records the necessary barriers for a synchronization scope using the resource usage data +// pre-computed in the frontend. Also performs lazy initialization if required. Returns whether any +// UAV are used in the synchronization scope if `passHasUAV` is passed and no errors are hit. +MaybeError TransitionAndClearForSyncScope(CommandRecordingContext* commandContext, + const SyncScopeResourceUsage& usages, + bool* passHasUAV = nullptr) { std::vector barriers; ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList(); @@ -336,9 +337,8 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext, for (size_t i = 0; i < usages.buffers.size(); ++i) { Buffer* buffer = ToBackend(usages.buffers[i]); - // TODO(crbug.com/dawn/852): clear storage buffers with - // ClearUnorderedAccessView*(). - buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext)); + // TODO(crbug.com/dawn/852): clear storage buffers with ClearUnorderedAccessView*(). + DAWN_TRY(buffer->EnsureDataInitialized(commandContext)); D3D12_RESOURCE_BARRIER barrier; if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier, @@ -356,13 +356,14 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext, // Clear subresources that are not render attachments. Render attachments will be // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture // subresource has not been initialized before the render pass. - usages.textureUsages[i].Iterate( - [&](const SubresourceRange& range, wgpu::TextureUsage usage) { + DAWN_TRY(usages.textureUsages[i].Iterate( + [&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError { if (usage & ~wgpu::TextureUsage::RenderAttachment) { - texture->EnsureSubresourceContentInitialized(commandContext, range); + DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range)); } textureUsages |= usage; - }); + return {}; + })); ToBackend(usages.textures[i]) ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers, @@ -373,8 +374,11 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext, commandList->ResourceBarrier(barriers.size(), barriers.data()); } - return (bufferUsages & wgpu::BufferUsage::Storage || - textureUsages & wgpu::TextureUsage::StorageBinding); + if (passHasUAV) { + *passHasUAV = bufferUsages & wgpu::BufferUsage::Storage || + textureUsages & wgpu::TextureUsage::StorageBinding; + } + return {}; } } // anonymous namespace @@ -753,8 +757,10 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext BeginRenderPassCmd* beginRenderPassCmd = mCommands.NextCommand(); - const bool passHasUAV = TransitionAndClearForSyncScope( - commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]); + bool passHasUAV; + DAWN_TRY(TransitionAndClearForSyncScope( + commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber], + &passHasUAV)); bindingTracker.SetInComputePass(false); LazyClearRenderPassAttachments(beginRenderPassCmd); @@ -808,7 +814,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext copy->destination.mipLevel)) { texture->SetIsSubresourceContentInitialized(true, subresources); } else { - texture->EnsureSubresourceContentInitialized(commandContext, subresources); + DAWN_TRY( + texture->EnsureSubresourceContentInitialized(commandContext, subresources)); } buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc); @@ -842,7 +849,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext SubresourceRange subresources = GetSubresourcesAffectedByCopy(copy->source, copy->copySize); - texture->EnsureSubresourceContentInitialized(commandContext, subresources); + DAWN_TRY( + texture->EnsureSubresourceContentInitialized(commandContext, subresources)); texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc, subresources); @@ -875,12 +883,13 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext SubresourceRange dstRange = GetSubresourcesAffectedByCopy(copy->destination, copy->copySize); - source->EnsureSubresourceContentInitialized(commandContext, srcRange); + DAWN_TRY(source->EnsureSubresourceContentInitialized(commandContext, srcRange)); if (IsCompleteSubresourceCopiedTo(destination, copy->copySize, copy->destination.mipLevel)) { destination->SetIsSubresourceContentInitialized(true, dstRange); } else { - destination->EnsureSubresourceContentInitialized(commandContext, dstRange); + DAWN_TRY( + destination->EnsureSubresourceContentInitialized(commandContext, dstRange)); } if (copy->source.texture.Get() == copy->destination.texture.Get() && @@ -1145,8 +1154,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandCont break; } - TransitionAndClearForSyncScope(commandContext, - resourceUsages.dispatchUsages[currentDispatch]); + DAWN_TRY(TransitionAndClearForSyncScope( + commandContext, resourceUsages.dispatchUsages[currentDispatch])); DAWN_TRY(bindingTracker->Apply(commandContext)); RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch); @@ -1158,8 +1167,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandCont case Command::DispatchIndirect: { DispatchIndirectCmd* dispatch = mCommands.NextCommand(); - TransitionAndClearForSyncScope(commandContext, - resourceUsages.dispatchUsages[currentDispatch]); + DAWN_TRY(TransitionAndClearForSyncScope( + commandContext, resourceUsages.dispatchUsages[currentDispatch])); DAWN_TRY(bindingTracker->Apply(commandContext)); ComPtr signature = diff --git a/src/dawn/native/d3d12/DeviceD3D12.cpp b/src/dawn/native/d3d12/DeviceD3D12.cpp index 62a0d9e679..43fa4cf6eb 100644 --- a/src/dawn/native/d3d12/DeviceD3D12.cpp +++ b/src/dawn/native/d3d12/DeviceD3D12.cpp @@ -530,7 +530,7 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source, if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst.mipLevel)) { texture->SetIsSubresourceContentInitialized(true, range); } else { - texture->EnsureSubresourceContentInitialized(commandContext, range); + DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range)); } texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range); diff --git a/src/dawn/native/d3d12/TextureD3D12.cpp b/src/dawn/native/d3d12/TextureD3D12.cpp index 7b493911ab..2d08a14bfa 100644 --- a/src/dawn/native/d3d12/TextureD3D12.cpp +++ b/src/dawn/native/d3d12/TextureD3D12.cpp @@ -1212,17 +1212,17 @@ void Texture::SetLabelImpl() { SetLabelHelper("Dawn_InternalTexture"); } -void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, - const SubresourceRange& range) { +MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, + const SubresourceRange& range) { if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) { - return; + return {}; } if (!IsSubresourceContentInitialized(range)) { // If subresource has not been initialized, clear it to black as it could contain // dirty bits from recycled memory - GetDevice()->ConsumedError( - ClearTexture(commandContext, range, TextureBase::ClearValue::Zero)); + DAWN_TRY(ClearTexture(commandContext, range, TextureBase::ClearValue::Zero)); } + return {}; } bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const { diff --git a/src/dawn/native/d3d12/TextureD3D12.h b/src/dawn/native/d3d12/TextureD3D12.h index 866fcdf5f0..f12121e030 100644 --- a/src/dawn/native/d3d12/TextureD3D12.h +++ b/src/dawn/native/d3d12/TextureD3D12.h @@ -77,8 +77,8 @@ class Texture final : public TextureBase { bool depthReadOnly, bool stencilReadOnly) const; - void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, - const SubresourceRange& range); + MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, + const SubresourceRange& range); MaybeError SynchronizeImportedTextureBeforeUse(); MaybeError SynchronizeImportedTextureAfterUse(); diff --git a/src/dawn/native/metal/CommandBufferMTL.mm b/src/dawn/native/metal/CommandBufferMTL.mm index a0a83ee572..2812563e26 100644 --- a/src/dawn/native/metal/CommandBufferMTL.mm +++ b/src/dawn/native/metal/CommandBufferMTL.mm @@ -735,23 +735,25 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) size_t nextRenderPassNumber = 0; auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope, - CommandRecordingContext* commandContext) { + CommandRecordingContext* commandContext) -> MaybeError { for (size_t i = 0; i < scope.textures.size(); ++i) { Texture* texture = ToBackend(scope.textures[i]); // Clear subresources that are not render attachments. Render attachments will be // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture // subresource has not been initialized before the render pass. - scope.textureUsages[i].Iterate( - [&](const SubresourceRange& range, wgpu::TextureUsage usage) { - if (usage & ~wgpu::TextureUsage::RenderAttachment) { - texture->EnsureSubresourceContentInitialized(commandContext, range); - } - }); + DAWN_TRY(scope.textureUsages[i].Iterate([&](const SubresourceRange& range, + wgpu::TextureUsage usage) -> MaybeError { + if (usage & ~wgpu::TextureUsage::RenderAttachment) { + DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range)); + } + return {}; + })); } for (BufferBase* bufferBase : scope.buffers) { ToBackend(bufferBase)->EnsureDataInitialized(commandContext); } + return {}; }; Command type; @@ -766,7 +768,7 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) } for (const SyncScopeResourceUsage& scope : GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) { - LazyClearSyncScope(scope, commandContext); + DAWN_TRY(LazyClearSyncScope(scope, commandContext)); } commandContext->EndBlit(); @@ -793,8 +795,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) } } } - LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber], - commandContext); + DAWN_TRY(LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber], + commandContext)); commandContext->EndBlit(); LazyClearRenderPassAttachments(cmd); @@ -858,7 +860,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) Texture* texture = ToBackend(dst.texture.Get()); buffer->EnsureDataInitialized(commandContext); - EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize); + DAWN_TRY( + EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize)); buffer->TrackUsage(); texture->SynchronizeTextureBeforeUse(commandContext); @@ -884,8 +887,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) buffer->EnsureDataInitializedAsDestination(commandContext, copy); texture->SynchronizeTextureBeforeUse(commandContext); - texture->EnsureSubresourceContentInitialized( - commandContext, GetSubresourcesAffectedByCopy(src, copySize)); + DAWN_TRY(texture->EnsureSubresourceContentInitialized( + commandContext, GetSubresourcesAffectedByCopy(src, copySize))); buffer->TrackUsage(); TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit( @@ -975,10 +978,10 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) srcTexture->SynchronizeTextureBeforeUse(commandContext); dstTexture->SynchronizeTextureBeforeUse(commandContext); - srcTexture->EnsureSubresourceContentInitialized( - commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize)); - EnsureDestinationTextureInitialized(commandContext, dstTexture, copy->destination, - copy->copySize); + DAWN_TRY(srcTexture->EnsureSubresourceContentInitialized( + commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize))); + DAWN_TRY(EnsureDestinationTextureInitialized(commandContext, dstTexture, + copy->destination, copy->copySize)); const MTLSize sizeOneSlice = MTLSizeMake(copy->copySize.width, copy->copySize.height, 1); diff --git a/src/dawn/native/metal/DeviceMTL.mm b/src/dawn/native/metal/DeviceMTL.mm index f2cf974495..840e3c8543 100644 --- a/src/dawn/native/metal/DeviceMTL.mm +++ b/src/dawn/native/metal/DeviceMTL.mm @@ -395,8 +395,8 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source, const Extent3D& copySizePixels) { Texture* texture = ToBackend(dst.texture.Get()); texture->SynchronizeTextureBeforeUse(GetPendingCommandContext()); - EnsureDestinationTextureInitialized(GetPendingCommandContext(DeviceBase::SubmitMode::Passive), - texture, dst, copySizePixels); + DAWN_TRY(EnsureDestinationTextureInitialized( + GetPendingCommandContext(DeviceBase::SubmitMode::Passive), texture, dst, copySizePixels)); RecordCopyBufferToTexture(GetPendingCommandContext(DeviceBase::SubmitMode::Passive), ToBackend(source)->GetMTLBuffer(), source->GetSize(), diff --git a/src/dawn/native/metal/TextureMTL.h b/src/dawn/native/metal/TextureMTL.h index 4141c2e961..79ad11b79d 100644 --- a/src/dawn/native/metal/TextureMTL.h +++ b/src/dawn/native/metal/TextureMTL.h @@ -58,8 +58,8 @@ class Texture final : public TextureBase { bool ShouldKeepInitialized() const; MTLBlitOption ComputeMTLBlitOption(Aspect aspect) const; - void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, - const SubresourceRange& range); + MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, + const SubresourceRange& range); void SynchronizeTextureBeforeUse(CommandRecordingContext* commandContext); void IOSurfaceEndAccess(ExternalImageIOSurfaceEndAccessDescriptor* descriptor); diff --git a/src/dawn/native/metal/TextureMTL.mm b/src/dawn/native/metal/TextureMTL.mm index a8d457bbd5..af1074f493 100644 --- a/src/dawn/native/metal/TextureMTL.mm +++ b/src/dawn/native/metal/TextureMTL.mm @@ -1076,19 +1076,19 @@ MTLBlitOption Texture::ComputeMTLBlitOption(Aspect aspect) const { return MTLBlitOptionNone; } -void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, - const SubresourceRange& range) { +MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext, + const SubresourceRange& range) { if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) { - return; + return {}; } if (!IsSubresourceContentInitialized(range)) { // If subresource has not been initialized, clear it to black as it could // contain dirty bits from recycled memory - GetDevice()->ConsumedError( - ClearTexture(commandContext, range, TextureBase::ClearValue::Zero)); + DAWN_TRY(ClearTexture(commandContext, range, TextureBase::ClearValue::Zero)); SetIsSubresourceContentInitialized(true, range); GetDevice()->IncrementLazyClearCountForTesting(); } + return {}; } // static @@ -1165,9 +1165,8 @@ MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) { // TODO(enga): Add a workaround to back combined depth/stencil textures // with Sampled usage using two separate textures. // Or, consider always using the workaround for D32S8. - device->ConsumedError( - DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of " - "combined depth/stencil format.")); + return DAWN_INTERNAL_ERROR("Cannot create stencil-only texture view of combined " + "depth/stencil format."); } } else if (GetTexture()->GetFormat().HasDepth() && GetTexture()->GetFormat().HasStencil()) { // Depth-only views for depth/stencil textures in Metal simply use the original diff --git a/src/dawn/native/metal/UtilsMetal.h b/src/dawn/native/metal/UtilsMetal.h index bb218e91e9..caf8d7bc0f 100644 --- a/src/dawn/native/metal/UtilsMetal.h +++ b/src/dawn/native/metal/UtilsMetal.h @@ -75,10 +75,10 @@ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture, uint32_t rowsPerImage, Aspect aspect); -void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext, - Texture* texture, - const TextureCopy& dst, - const Extent3D& size); +MaybeError EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext, + Texture* texture, + const TextureCopy& dst, + const Extent3D& size); // Allow use MTLStoreActionStoreAndMultismapleResolve because the logic in the backend is // first to compute what the "best" Metal render pass descriptor is, then fix it up if we diff --git a/src/dawn/native/metal/UtilsMetal.mm b/src/dawn/native/metal/UtilsMetal.mm index a80672b9ec..0eae076ade 100644 --- a/src/dawn/native/metal/UtilsMetal.mm +++ b/src/dawn/native/metal/UtilsMetal.mm @@ -344,17 +344,18 @@ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture, return copy; } -void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext, - Texture* texture, - const TextureCopy& dst, - const Extent3D& size) { +MaybeError EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext, + Texture* texture, + const TextureCopy& dst, + const Extent3D& size) { ASSERT(texture == dst.texture.Get()); SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size); if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) { texture->SetIsSubresourceContentInitialized(true, range); } else { - texture->EnsureSubresourceContentInitialized(commandContext, range); + DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range)); } + return {}; } MaybeError EncodeMetalRenderPass(Device* device, diff --git a/src/dawn/native/opengl/CommandBufferGL.cpp b/src/dawn/native/opengl/CommandBufferGL.cpp index 99d219bea0..9eadba6ee0 100644 --- a/src/dawn/native/opengl/CommandBufferGL.cpp +++ b/src/dawn/native/opengl/CommandBufferGL.cpp @@ -451,24 +451,26 @@ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescrip MaybeError CommandBuffer::Execute() { const OpenGLFunctions& gl = ToBackend(GetDevice())->GetGL(); - auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) { + auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) -> MaybeError { for (size_t i = 0; i < scope.textures.size(); i++) { Texture* texture = ToBackend(scope.textures[i]); // Clear subresources that are not render attachments. Render attachments will be // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture // subresource has not been initialized before the render pass. - scope.textureUsages[i].Iterate( - [&](const SubresourceRange& range, wgpu::TextureUsage usage) { + DAWN_TRY(scope.textureUsages[i].Iterate( + [&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError { if (usage & ~wgpu::TextureUsage::RenderAttachment) { - texture->EnsureSubresourceContentInitialized(range); + DAWN_TRY(texture->EnsureSubresourceContentInitialized(range)); } - }); + return {}; + })); } for (BufferBase* bufferBase : scope.buffers) { ToBackend(bufferBase)->EnsureDataInitialized(); } + return {}; }; size_t nextComputePassNumber = 0; @@ -481,7 +483,7 @@ MaybeError CommandBuffer::Execute() { mCommands.NextCommand(); for (const SyncScopeResourceUsage& scope : GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) { - LazyClearSyncScope(scope); + DAWN_TRY(LazyClearSyncScope(scope)); } DAWN_TRY(ExecuteComputePass()); @@ -491,7 +493,8 @@ MaybeError CommandBuffer::Execute() { case Command::BeginRenderPass: { auto* cmd = mCommands.NextCommand(); - LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]); + DAWN_TRY( + LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber])); LazyClearRenderPassAttachments(cmd); DAWN_TRY(ExecuteRenderPass(cmd)); @@ -546,7 +549,7 @@ MaybeError CommandBuffer::Execute() { dst.mipLevel)) { dst.texture->SetIsSubresourceContentInitialized(true, range); } else { - ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range); + DAWN_TRY(ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range)); } gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle()); @@ -593,7 +596,7 @@ MaybeError CommandBuffer::Execute() { buffer->EnsureDataInitializedAsDestination(copy); SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize); - texture->EnsureSubresourceContentInitialized(subresources); + DAWN_TRY(texture->EnsureSubresourceContentInitialized(subresources)); // The only way to move data from a texture to a buffer in GL is via // glReadPixels with a pack buffer. Create a temporary FBO for the copy. gl.BindTexture(target, texture->GetHandle()); @@ -694,11 +697,11 @@ MaybeError CommandBuffer::Execute() { SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize); SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize); - srcTexture->EnsureSubresourceContentInitialized(srcRange); + DAWN_TRY(srcTexture->EnsureSubresourceContentInitialized(srcRange)); if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) { dstTexture->SetIsSubresourceContentInitialized(true, dstRange); } else { - dstTexture->EnsureSubresourceContentInitialized(dstRange); + DAWN_TRY(dstTexture->EnsureSubresourceContentInitialized(dstRange)); } CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(), srcTexture->GetGLTarget(), src.mipLevel, src.origin, dstTexture->GetHandle(), diff --git a/src/dawn/native/opengl/DeviceGL.cpp b/src/dawn/native/opengl/DeviceGL.cpp index 40e948bc5a..2e4f8a9116 100644 --- a/src/dawn/native/opengl/DeviceGL.cpp +++ b/src/dawn/native/opengl/DeviceGL.cpp @@ -250,7 +250,7 @@ ResultOrError> Device::CreateSwapChainImpl( return DAWN_VALIDATION_ERROR("New swapchains not implemented."); } ResultOrError> Device::CreateTextureImpl(const TextureDescriptor* descriptor) { - return AcquireRef(new Texture(this, descriptor)); + return Texture::Create(this, descriptor); } ResultOrError> Device::CreateTextureViewImpl( TextureBase* texture, @@ -319,10 +319,10 @@ TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor if (textureDescriptor->size.width != static_cast(width) || textureDescriptor->size.height != static_cast(height) || textureDescriptor->size.depthOrArrayLayers != 1) { - ConsumedError(DAWN_VALIDATION_ERROR( + gl.DeleteTextures(1, &tex); + HandleError(DAWN_VALIDATION_ERROR( "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.", width, height, &textureDescriptor->size)); - gl.DeleteTextures(1, &tex); return nullptr; } diff --git a/src/dawn/native/opengl/QueueGL.cpp b/src/dawn/native/opengl/QueueGL.cpp index f056c162bd..5042a8b720 100644 --- a/src/dawn/native/opengl/QueueGL.cpp +++ b/src/dawn/native/opengl/QueueGL.cpp @@ -69,7 +69,7 @@ MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination, if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel, destination.mipLevel)) { destination.texture->SetIsSubresourceContentInitialized(true, range); } else { - ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range); + DAWN_TRY(ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range)); } DoTexSubImage(ToBackend(GetDevice())->GetGL(), textureCopy, data, dataLayout, writeSizePixel); ToBackend(destination.texture)->Touch(); diff --git a/src/dawn/native/opengl/TextureGL.cpp b/src/dawn/native/opengl/TextureGL.cpp index 906e398ab2..4774a5a7a2 100644 --- a/src/dawn/native/opengl/TextureGL.cpp +++ b/src/dawn/native/opengl/TextureGL.cpp @@ -15,6 +15,7 @@ #include "dawn/native/opengl/TextureGL.h" #include +#include #include "dawn/common/Assert.h" #include "dawn/common/Constants.h" @@ -170,6 +171,16 @@ void AllocateTexture(const OpenGLFunctions& gl, // Texture +// static +ResultOrError> Texture::Create(Device* device, const TextureDescriptor* descriptor) { + Ref texture = AcquireRef(new Texture(device, descriptor)); + if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) { + DAWN_TRY( + texture->ClearTexture(texture->GetAllSubresources(), TextureBase::ClearValue::NonZero)); + } + return std::move(texture); +} + Texture::Texture(Device* device, const TextureDescriptor* descriptor) : Texture(device, descriptor, 0, TextureState::OwnedInternal) { const OpenGLFunctions& gl = device->GetGL(); @@ -186,11 +197,6 @@ Texture::Texture(Device* device, const TextureDescriptor* descriptor) // The texture is not complete if it uses mipmapping and not all levels up to // MAX_LEVEL have been defined. gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1); - - if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) { - GetDevice()->ConsumedError( - ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero)); - } } void Texture::Touch() { @@ -539,13 +545,14 @@ MaybeError Texture::ClearTexture(const SubresourceRange& range, return {}; } -void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) { +MaybeError Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) { if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) { - return; + return {}; } if (!IsSubresourceContentInitialized(range)) { - GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero)); + DAWN_TRY(ClearTexture(range, TextureBase::ClearValue::Zero)); } + return {}; } // TextureView diff --git a/src/dawn/native/opengl/TextureGL.h b/src/dawn/native/opengl/TextureGL.h index c9bf63f87b..d9bacc80a5 100644 --- a/src/dawn/native/opengl/TextureGL.h +++ b/src/dawn/native/opengl/TextureGL.h @@ -26,7 +26,7 @@ struct GLFormat; class Texture final : public TextureBase { public: - Texture(Device* device, const TextureDescriptor* descriptor); + static ResultOrError> Create(Device* device, const TextureDescriptor* descriptor); Texture(Device* device, const TextureDescriptor* descriptor, GLuint handle, TextureState state); GLuint GetHandle() const; @@ -35,9 +35,10 @@ class Texture final : public TextureBase { uint32_t GetGenID() const; void Touch(); - void EnsureSubresourceContentInitialized(const SubresourceRange& range); + MaybeError EnsureSubresourceContentInitialized(const SubresourceRange& range); private: + Texture(Device* device, const TextureDescriptor* descriptor); ~Texture() override; void DestroyImpl() override; diff --git a/src/dawn/native/vulkan/CommandBufferVk.cpp b/src/dawn/native/vulkan/CommandBufferVk.cpp index 54e197d5c9..6ddfba0050 100644 --- a/src/dawn/native/vulkan/CommandBufferVk.cpp +++ b/src/dawn/native/vulkan/CommandBufferVk.cpp @@ -154,9 +154,9 @@ class DescriptorSetTracker : public BindGroupTrackerBase { // Records the necessary barriers for a synchronization scope using the resource usage // data pre-computed in the frontend. Also performs lazy initialization if required. -void TransitionAndClearForSyncScope(Device* device, - CommandRecordingContext* recordingContext, - const SyncScopeResourceUsage& scope) { +MaybeError TransitionAndClearForSyncScope(Device* device, + CommandRecordingContext* recordingContext, + const SyncScopeResourceUsage& scope) { std::vector bufferBarriers; std::vector imageBarriers; VkPipelineStageFlags srcStages = 0; @@ -179,12 +179,13 @@ void TransitionAndClearForSyncScope(Device* device, // Clear subresources that are not render attachments. Render attachments will be // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture // subresource has not been initialized before the render pass. - scope.textureUsages[i].Iterate( - [&](const SubresourceRange& range, wgpu::TextureUsage usage) { + DAWN_TRY(scope.textureUsages[i].Iterate( + [&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError { if (usage & ~wgpu::TextureUsage::RenderAttachment) { - texture->EnsureSubresourceContentInitialized(recordingContext, range); + DAWN_TRY(texture->EnsureSubresourceContentInitialized(recordingContext, range)); } - }); + return {}; + })); texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i], &imageBarriers, &srcStages, &dstStages); } @@ -194,6 +195,7 @@ void TransitionAndClearForSyncScope(Device* device, nullptr, bufferBarriers.size(), bufferBarriers.data(), imageBarriers.size(), imageBarriers.data()); } + return {}; } MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext, @@ -512,8 +514,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte // And resets the used query sets which are rewritten on the render pass. auto PrepareResourcesForRenderPass = [](Device* device, CommandRecordingContext* recordingContext, - const RenderPassResourceUsage& usages) { - TransitionAndClearForSyncScope(device, recordingContext, usages); + const RenderPassResourceUsage& usages) -> MaybeError { + DAWN_TRY(TransitionAndClearForSyncScope(device, recordingContext, usages)); // Reset all query set used on current render pass together before beginning render pass // because the reset command must be called outside render pass @@ -521,6 +523,7 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer, usages.querySets[i], usages.queryAvailabilities[i]); } + return {}; }; size_t nextComputePassNumber = 0; @@ -580,8 +583,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte // Since texture has been overwritten, it has been "initialized" dst.texture->SetIsSubresourceContentInitialized(true, range); } else { - ToBackend(dst.texture) - ->EnsureSubresourceContentInitialized(recordingContext, range); + DAWN_TRY(ToBackend(dst.texture) + ->EnsureSubresourceContentInitialized(recordingContext, range)); } ToBackend(src.buffer) ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc); @@ -614,8 +617,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte SubresourceRange range = GetSubresourcesAffectedByCopy(copy->source, copy->copySize); - ToBackend(src.texture) - ->EnsureSubresourceContentInitialized(recordingContext, range); + DAWN_TRY(ToBackend(src.texture) + ->EnsureSubresourceContentInitialized(recordingContext, range)); ToBackend(src.texture) ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range); @@ -642,15 +645,15 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize); SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize); - ToBackend(src.texture) - ->EnsureSubresourceContentInitialized(recordingContext, srcRange); + DAWN_TRY(ToBackend(src.texture) + ->EnsureSubresourceContentInitialized(recordingContext, srcRange)); if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize, dst.mipLevel)) { // Since destination texture has been overwritten, it has been "initialized" dst.texture->SetIsSubresourceContentInitialized(true, dstRange); } else { - ToBackend(dst.texture) - ->EnsureSubresourceContentInitialized(recordingContext, dstRange); + DAWN_TRY(ToBackend(dst.texture) + ->EnsureSubresourceContentInitialized(recordingContext, dstRange)); } if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) { @@ -730,9 +733,9 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte case Command::BeginRenderPass: { BeginRenderPassCmd* cmd = mCommands.NextCommand(); - PrepareResourcesForRenderPass( + DAWN_TRY(PrepareResourcesForRenderPass( device, recordingContext, - GetResourceUsages().renderPasses[nextRenderPassNumber]); + GetResourceUsages().renderPasses[nextRenderPassNumber])); LazyClearRenderPassAttachments(cmd); DAWN_TRY(RecordRenderPass(recordingContext, cmd)); @@ -935,8 +938,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingCo case Command::Dispatch: { DispatchCmd* dispatch = mCommands.NextCommand(); - TransitionAndClearForSyncScope(device, recordingContext, - resourceUsages.dispatchUsages[currentDispatch]); + DAWN_TRY(TransitionAndClearForSyncScope( + device, recordingContext, resourceUsages.dispatchUsages[currentDispatch])); descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE); device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z); @@ -948,8 +951,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingCo DispatchIndirectCmd* dispatch = mCommands.NextCommand(); VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle(); - TransitionAndClearForSyncScope(device, recordingContext, - resourceUsages.dispatchUsages[currentDispatch]); + DAWN_TRY(TransitionAndClearForSyncScope( + device, recordingContext, resourceUsages.dispatchUsages[currentDispatch])); descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE); device->fn.CmdDispatchIndirect(commands, indirectBuffer, diff --git a/src/dawn/native/vulkan/DeviceVk.cpp b/src/dawn/native/vulkan/DeviceVk.cpp index f4203801aa..370b3b593a 100644 --- a/src/dawn/native/vulkan/DeviceVk.cpp +++ b/src/dawn/native/vulkan/DeviceVk.cpp @@ -816,7 +816,8 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source, // Since texture has been overwritten, it has been "initialized" dst.texture->SetIsSubresourceContentInitialized(true, range); } else { - ToBackend(dst.texture)->EnsureSubresourceContentInitialized(recordingContext, range); + DAWN_TRY( + ToBackend(dst.texture)->EnsureSubresourceContentInitialized(recordingContext, range)); } // Insert pipeline barrier to ensure correct ordering with previous memory operations on the // texture. diff --git a/src/dawn/native/vulkan/TextureVk.cpp b/src/dawn/native/vulkan/TextureVk.cpp index 68e858c8a8..b15b9bef2d 100644 --- a/src/dawn/native/vulkan/TextureVk.cpp +++ b/src/dawn/native/vulkan/TextureVk.cpp @@ -1342,17 +1342,17 @@ MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext, return {}; } -void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext, - const SubresourceRange& range) { +MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext, + const SubresourceRange& range) { if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) { - return; + return {}; } if (!IsSubresourceContentInitialized(range)) { // If subresource has not been initialized, clear it to black as it could contain dirty // bits from recycled memory - GetDevice()->ConsumedError( - ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero)); + DAWN_TRY(ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero)); } + return {}; } void Texture::UpdateExternalSemaphoreHandle(ExternalSemaphoreHandle handle) { diff --git a/src/dawn/native/vulkan/TextureVk.h b/src/dawn/native/vulkan/TextureVk.h index 48017bf953..0d9a923e2d 100644 --- a/src/dawn/native/vulkan/TextureVk.h +++ b/src/dawn/native/vulkan/TextureVk.h @@ -84,8 +84,8 @@ class Texture final : public TextureBase { void TransitionEagerlyForExport(CommandRecordingContext* recordingContext); std::vector AcquireWaitRequirements(); - void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext, - const SubresourceRange& range); + MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext, + const SubresourceRange& range); VkImageLayout GetCurrentLayoutForSwapChain() const; diff --git a/src/dawn/tests/unittests/SubresourceStorageTests.cpp b/src/dawn/tests/unittests/SubresourceStorageTests.cpp index fb2759aefe..2f5fadd974 100644 --- a/src/dawn/tests/unittests/SubresourceStorageTests.cpp +++ b/src/dawn/tests/unittests/SubresourceStorageTests.cpp @@ -12,14 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include #include #include "dawn/common/Log.h" #include "dawn/native/SubresourceStorage.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" namespace dawn::native { +using ::testing::HasSubstr; + // A fake class that replicates the behavior of SubresourceStorage but without any compression // and is used to compare the results of operations on SubresourceStorage against the "ground // truth" of FakeStorage. @@ -211,6 +216,31 @@ bool operator==(const SmallData& a, const SmallData& b) { return a.value == b.value; } +// Tests that the MaybeError version of Iterate returns the first error that it encounters. +TEST(SubresourceStorageTest, IterateMaybeError) { + // Create a resource with multiple layers of different data so that we can ensure that the + // iterate function runs more than once. + constexpr uint32_t kLayers = 4; + SubresourceStorage s(Aspect::Color, kLayers, 1); + for (uint32_t layer = 0; layer < kLayers; layer++) { + s.Update(SubresourceRange::MakeSingle(Aspect::Color, layer, 0), + [&](const SubresourceRange&, uint32_t* data) { *data = layer + 1; }); + } + + // Make sure that the first error is returned. + uint32_t errorLayer = 0; + MaybeError maybeError = + s.Iterate([&](const SubresourceRange& range, const uint32_t& layer) -> MaybeError { + if (!errorLayer) { + errorLayer = layer; + } + return DAWN_VALIDATION_ERROR("Errored at layer: %d", layer); + }); + ASSERT_TRUE(maybeError.IsError()); + std::unique_ptr error = maybeError.AcquireError(); + EXPECT_THAT(error->GetFormattedMessage(), HasSubstr(std::to_string(errorLayer))); +} + // Test that the default value is correctly set. TEST(SubresourceStorageTest, DefaultValue) { // Test setting no default value for a primitive type.