Bubble up errors from EnsureSubresourceContentInitialized.
Bug: dawn:1336 Change-Id: I1fd189bd6e3689df6f10351e8ba19fee569bda23 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/122023 Kokoro: Kokoro <noreply+kokoro@google.com> Commit-Queue: Loko Kung <lokokung@google.com> Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
84532462f6
commit
02e456c9fb
|
@ -55,18 +55,18 @@ MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
|
||||||
// combination of readonly usages.
|
// combination of readonly usages.
|
||||||
for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
|
for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
|
||||||
const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
|
const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
|
||||||
MaybeError error = {};
|
DAWN_TRY(textureUsage.Iterate(
|
||||||
textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
|
[&](const SubresourceRange&, const wgpu::TextureUsage& usage) -> MaybeError {
|
||||||
bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
|
bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
|
||||||
bool singleUse = wgpu::HasZeroOrOneBits(usage);
|
bool singleUse = wgpu::HasZeroOrOneBits(usage);
|
||||||
if (!readOnly && !singleUse && !error.IsError()) {
|
if (!readOnly && !singleUse) {
|
||||||
error = DAWN_VALIDATION_ERROR(
|
return DAWN_VALIDATION_ERROR(
|
||||||
"%s usage (%s) includes writable usage and another usage in the same "
|
"%s usage (%s) includes writable usage and another usage in the same "
|
||||||
"synchronization scope.",
|
"synchronization scope.",
|
||||||
scope.textures[i], usage);
|
scope.textures[i], usage);
|
||||||
}
|
}
|
||||||
});
|
return {};
|
||||||
DAWN_TRY(std::move(error));
|
}));
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,11 +18,13 @@
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <type_traits>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "dawn/common/Assert.h"
|
#include "dawn/common/Assert.h"
|
||||||
#include "dawn/common/TypeTraits.h"
|
#include "dawn/common/TypeTraits.h"
|
||||||
#include "dawn/native/EnumMaskIterator.h"
|
#include "dawn/native/EnumMaskIterator.h"
|
||||||
|
#include "dawn/native/Error.h"
|
||||||
#include "dawn/native/Subresource.h"
|
#include "dawn/native/Subresource.h"
|
||||||
|
|
||||||
namespace dawn::native {
|
namespace dawn::native {
|
||||||
|
@ -120,17 +122,27 @@ class SubresourceStorage {
|
||||||
// same for multiple subresources.
|
// same for multiple subresources.
|
||||||
const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
|
const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
|
||||||
|
|
||||||
// Given an iterateFunc that's a function or function-like objet that can be called with
|
// Given an iterateFunc that's a function or function-like object that can be called with
|
||||||
// arguments of type (const SubresourceRange& range, const T& data) and returns void,
|
// arguments of type (const SubresourceRange& range, const T& data) and returns either void or
|
||||||
// calls it with aggregate ranges if possible, such that each subresource is part of
|
// MaybeError, calls it with aggregate ranges if possible, such that each subresource is part of
|
||||||
// exactly one of the ranges iterateFunc is called with (and obviously data is the value
|
// exactly one of the ranges iterateFunc is called with (and obviously data is the value
|
||||||
// stored for that subresource). For example:
|
// stored for that subresource). Note that for MaybeError version, Iterate will return on the
|
||||||
|
// first error. Example usages:
|
||||||
//
|
//
|
||||||
|
// // Returning void version:
|
||||||
// subresources.Iterate([&](const SubresourceRange& range, const T& data) {
|
// subresources.Iterate([&](const SubresourceRange& range, const T& data) {
|
||||||
// // Do something with range and data.
|
// // Do something with range and data.
|
||||||
// });
|
// });
|
||||||
template <typename F>
|
//
|
||||||
void Iterate(F&& iterateFunc) const;
|
// // Return MaybeError version:
|
||||||
|
// DAWN_TRY(subresources.Iterate(
|
||||||
|
// [&](const SubresourceRange& range, const T& data) -> MaybeError {
|
||||||
|
// // Do something with range and data.
|
||||||
|
// // Return a MaybeError.
|
||||||
|
// })
|
||||||
|
// );
|
||||||
|
template <typename F, typename R = std::invoke_result_t<F, const SubresourceRange&, const T&>>
|
||||||
|
R Iterate(F&& iterateFunc) const;
|
||||||
|
|
||||||
// Given an updateFunc that's a function or function-like objet that can be called with
|
// Given an updateFunc that's a function or function-like objet that can be called with
|
||||||
// arguments of type (const SubresourceRange& range, T* data) and returns void,
|
// arguments of type (const SubresourceRange& range, T* data) and returns void,
|
||||||
|
@ -239,6 +251,11 @@ SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
|
||||||
template <typename T>
|
template <typename T>
|
||||||
template <typename F>
|
template <typename F>
|
||||||
void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
|
void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
|
||||||
|
ASSERT(range.baseArrayLayer < mArrayLayerCount &&
|
||||||
|
range.baseArrayLayer + range.layerCount <= mArrayLayerCount);
|
||||||
|
ASSERT(range.baseMipLevel < mMipLevelCount &&
|
||||||
|
range.baseMipLevel + range.levelCount <= mMipLevelCount);
|
||||||
|
|
||||||
bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
|
bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
|
||||||
bool fullAspects =
|
bool fullAspects =
|
||||||
range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
|
range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
|
||||||
|
@ -351,8 +368,12 @@ void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeF
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
template <typename F>
|
template <typename F, typename R>
|
||||||
void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
|
R SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
|
||||||
|
static_assert(std::is_same_v<R, MaybeError> || std::is_same_v<R, void>,
|
||||||
|
"R must be either void or MaybeError");
|
||||||
|
constexpr bool mayError = std::is_same_v<R, MaybeError>;
|
||||||
|
|
||||||
for (Aspect aspect : IterateEnumMask(mAspects)) {
|
for (Aspect aspect : IterateEnumMask(mAspects)) {
|
||||||
uint32_t aspectIndex = GetAspectIndex(aspect);
|
uint32_t aspectIndex = GetAspectIndex(aspect);
|
||||||
|
|
||||||
|
@ -360,7 +381,11 @@ void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
|
||||||
if (mAspectCompressed[aspectIndex]) {
|
if (mAspectCompressed[aspectIndex]) {
|
||||||
SubresourceRange range =
|
SubresourceRange range =
|
||||||
SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
|
SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
|
||||||
iterateFunc(range, DataInline(aspectIndex));
|
if constexpr (mayError) {
|
||||||
|
DAWN_TRY(iterateFunc(range, DataInline(aspectIndex)));
|
||||||
|
} else {
|
||||||
|
iterateFunc(range, DataInline(aspectIndex));
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,17 +393,28 @@ void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
|
||||||
// Fast path, call iterateFunc on the whole array layer at once.
|
// Fast path, call iterateFunc on the whole array layer at once.
|
||||||
if (LayerCompressed(aspectIndex, layer)) {
|
if (LayerCompressed(aspectIndex, layer)) {
|
||||||
SubresourceRange range = GetFullLayerRange(aspect, layer);
|
SubresourceRange range = GetFullLayerRange(aspect, layer);
|
||||||
iterateFunc(range, Data(aspectIndex, layer));
|
if constexpr (mayError) {
|
||||||
|
DAWN_TRY(iterateFunc(range, Data(aspectIndex, layer)));
|
||||||
|
} else {
|
||||||
|
iterateFunc(range, Data(aspectIndex, layer));
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path, call iterateFunc for each mip level.
|
// Slow path, call iterateFunc for each mip level.
|
||||||
for (uint32_t level = 0; level < mMipLevelCount; level++) {
|
for (uint32_t level = 0; level < mMipLevelCount; level++) {
|
||||||
SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
|
SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
|
||||||
iterateFunc(range, Data(aspectIndex, layer, level));
|
if constexpr (mayError) {
|
||||||
|
DAWN_TRY(iterateFunc(range, Data(aspectIndex, layer, level)));
|
||||||
|
} else {
|
||||||
|
iterateFunc(range, Data(aspectIndex, layer, level));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if constexpr (mayError) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
|
|
@ -322,11 +322,12 @@ void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
|
||||||
0);
|
0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Records the necessary barriers for a synchronization scope using the resource usage
|
// Records the necessary barriers for a synchronization scope using the resource usage data
|
||||||
// data pre-computed in the frontend. Also performs lazy initialization if required.
|
// pre-computed in the frontend. Also performs lazy initialization if required. Returns whether any
|
||||||
// Returns whether any UAV are used in the synchronization scope.
|
// UAV are used in the synchronization scope if `passHasUAV` is passed and no errors are hit.
|
||||||
bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
|
MaybeError TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
|
||||||
const SyncScopeResourceUsage& usages) {
|
const SyncScopeResourceUsage& usages,
|
||||||
|
bool* passHasUAV = nullptr) {
|
||||||
std::vector<D3D12_RESOURCE_BARRIER> barriers;
|
std::vector<D3D12_RESOURCE_BARRIER> barriers;
|
||||||
|
|
||||||
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
|
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
|
||||||
|
@ -336,9 +337,8 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
|
||||||
for (size_t i = 0; i < usages.buffers.size(); ++i) {
|
for (size_t i = 0; i < usages.buffers.size(); ++i) {
|
||||||
Buffer* buffer = ToBackend(usages.buffers[i]);
|
Buffer* buffer = ToBackend(usages.buffers[i]);
|
||||||
|
|
||||||
// TODO(crbug.com/dawn/852): clear storage buffers with
|
// TODO(crbug.com/dawn/852): clear storage buffers with ClearUnorderedAccessView*().
|
||||||
// ClearUnorderedAccessView*().
|
DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
|
||||||
buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
|
|
||||||
|
|
||||||
D3D12_RESOURCE_BARRIER barrier;
|
D3D12_RESOURCE_BARRIER barrier;
|
||||||
if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
|
if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
|
||||||
|
@ -356,13 +356,14 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
|
||||||
// Clear subresources that are not render attachments. Render attachments will be
|
// Clear subresources that are not render attachments. Render attachments will be
|
||||||
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
||||||
// subresource has not been initialized before the render pass.
|
// subresource has not been initialized before the render pass.
|
||||||
usages.textureUsages[i].Iterate(
|
DAWN_TRY(usages.textureUsages[i].Iterate(
|
||||||
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
|
[&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError {
|
||||||
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
||||||
texture->EnsureSubresourceContentInitialized(commandContext, range);
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
|
||||||
}
|
}
|
||||||
textureUsages |= usage;
|
textureUsages |= usage;
|
||||||
});
|
return {};
|
||||||
|
}));
|
||||||
|
|
||||||
ToBackend(usages.textures[i])
|
ToBackend(usages.textures[i])
|
||||||
->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
|
->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
|
||||||
|
@ -373,8 +374,11 @@ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
|
||||||
commandList->ResourceBarrier(barriers.size(), barriers.data());
|
commandList->ResourceBarrier(barriers.size(), barriers.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
return (bufferUsages & wgpu::BufferUsage::Storage ||
|
if (passHasUAV) {
|
||||||
textureUsages & wgpu::TextureUsage::StorageBinding);
|
*passHasUAV = bufferUsages & wgpu::BufferUsage::Storage ||
|
||||||
|
textureUsages & wgpu::TextureUsage::StorageBinding;
|
||||||
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
@ -753,8 +757,10 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
|
||||||
BeginRenderPassCmd* beginRenderPassCmd =
|
BeginRenderPassCmd* beginRenderPassCmd =
|
||||||
mCommands.NextCommand<BeginRenderPassCmd>();
|
mCommands.NextCommand<BeginRenderPassCmd>();
|
||||||
|
|
||||||
const bool passHasUAV = TransitionAndClearForSyncScope(
|
bool passHasUAV;
|
||||||
commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
|
DAWN_TRY(TransitionAndClearForSyncScope(
|
||||||
|
commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber],
|
||||||
|
&passHasUAV));
|
||||||
bindingTracker.SetInComputePass(false);
|
bindingTracker.SetInComputePass(false);
|
||||||
|
|
||||||
LazyClearRenderPassAttachments(beginRenderPassCmd);
|
LazyClearRenderPassAttachments(beginRenderPassCmd);
|
||||||
|
@ -808,7 +814,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
|
||||||
copy->destination.mipLevel)) {
|
copy->destination.mipLevel)) {
|
||||||
texture->SetIsSubresourceContentInitialized(true, subresources);
|
texture->SetIsSubresourceContentInitialized(true, subresources);
|
||||||
} else {
|
} else {
|
||||||
texture->EnsureSubresourceContentInitialized(commandContext, subresources);
|
DAWN_TRY(
|
||||||
|
texture->EnsureSubresourceContentInitialized(commandContext, subresources));
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
|
buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
|
||||||
|
@ -842,7 +849,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
|
||||||
SubresourceRange subresources =
|
SubresourceRange subresources =
|
||||||
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
|
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
|
||||||
|
|
||||||
texture->EnsureSubresourceContentInitialized(commandContext, subresources);
|
DAWN_TRY(
|
||||||
|
texture->EnsureSubresourceContentInitialized(commandContext, subresources));
|
||||||
|
|
||||||
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
|
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
|
||||||
subresources);
|
subresources);
|
||||||
|
@ -875,12 +883,13 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext
|
||||||
SubresourceRange dstRange =
|
SubresourceRange dstRange =
|
||||||
GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
|
GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
|
||||||
|
|
||||||
source->EnsureSubresourceContentInitialized(commandContext, srcRange);
|
DAWN_TRY(source->EnsureSubresourceContentInitialized(commandContext, srcRange));
|
||||||
if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
|
if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
|
||||||
copy->destination.mipLevel)) {
|
copy->destination.mipLevel)) {
|
||||||
destination->SetIsSubresourceContentInitialized(true, dstRange);
|
destination->SetIsSubresourceContentInitialized(true, dstRange);
|
||||||
} else {
|
} else {
|
||||||
destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
|
DAWN_TRY(
|
||||||
|
destination->EnsureSubresourceContentInitialized(commandContext, dstRange));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copy->source.texture.Get() == copy->destination.texture.Get() &&
|
if (copy->source.texture.Get() == copy->destination.texture.Get() &&
|
||||||
|
@ -1145,8 +1154,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandCont
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
TransitionAndClearForSyncScope(commandContext,
|
DAWN_TRY(TransitionAndClearForSyncScope(
|
||||||
resourceUsages.dispatchUsages[currentDispatch]);
|
commandContext, resourceUsages.dispatchUsages[currentDispatch]));
|
||||||
DAWN_TRY(bindingTracker->Apply(commandContext));
|
DAWN_TRY(bindingTracker->Apply(commandContext));
|
||||||
|
|
||||||
RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
|
RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
|
||||||
|
@ -1158,8 +1167,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandCont
|
||||||
case Command::DispatchIndirect: {
|
case Command::DispatchIndirect: {
|
||||||
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
|
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
|
||||||
|
|
||||||
TransitionAndClearForSyncScope(commandContext,
|
DAWN_TRY(TransitionAndClearForSyncScope(
|
||||||
resourceUsages.dispatchUsages[currentDispatch]);
|
commandContext, resourceUsages.dispatchUsages[currentDispatch]));
|
||||||
DAWN_TRY(bindingTracker->Apply(commandContext));
|
DAWN_TRY(bindingTracker->Apply(commandContext));
|
||||||
|
|
||||||
ComPtr<ID3D12CommandSignature> signature =
|
ComPtr<ID3D12CommandSignature> signature =
|
||||||
|
|
|
@ -530,7 +530,7 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source,
|
||||||
if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst.mipLevel)) {
|
if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst.mipLevel)) {
|
||||||
texture->SetIsSubresourceContentInitialized(true, range);
|
texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
} else {
|
} else {
|
||||||
texture->EnsureSubresourceContentInitialized(commandContext, range);
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
|
||||||
}
|
}
|
||||||
|
|
||||||
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
|
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
|
||||||
|
|
|
@ -1212,17 +1212,17 @@ void Texture::SetLabelImpl() {
|
||||||
SetLabelHelper("Dawn_InternalTexture");
|
SetLabelHelper("Dawn_InternalTexture");
|
||||||
}
|
}
|
||||||
|
|
||||||
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
||||||
const SubresourceRange& range) {
|
const SubresourceRange& range) {
|
||||||
if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
||||||
return;
|
return {};
|
||||||
}
|
}
|
||||||
if (!IsSubresourceContentInitialized(range)) {
|
if (!IsSubresourceContentInitialized(range)) {
|
||||||
// If subresource has not been initialized, clear it to black as it could contain
|
// If subresource has not been initialized, clear it to black as it could contain
|
||||||
// dirty bits from recycled memory
|
// dirty bits from recycled memory
|
||||||
GetDevice()->ConsumedError(
|
DAWN_TRY(ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
|
||||||
ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
|
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
|
bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
|
||||||
|
|
|
@ -77,8 +77,8 @@ class Texture final : public TextureBase {
|
||||||
bool depthReadOnly,
|
bool depthReadOnly,
|
||||||
bool stencilReadOnly) const;
|
bool stencilReadOnly) const;
|
||||||
|
|
||||||
void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
||||||
const SubresourceRange& range);
|
const SubresourceRange& range);
|
||||||
|
|
||||||
MaybeError SynchronizeImportedTextureBeforeUse();
|
MaybeError SynchronizeImportedTextureBeforeUse();
|
||||||
MaybeError SynchronizeImportedTextureAfterUse();
|
MaybeError SynchronizeImportedTextureAfterUse();
|
||||||
|
|
|
@ -735,23 +735,25 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
|
||||||
size_t nextRenderPassNumber = 0;
|
size_t nextRenderPassNumber = 0;
|
||||||
|
|
||||||
auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
|
auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
|
||||||
CommandRecordingContext* commandContext) {
|
CommandRecordingContext* commandContext) -> MaybeError {
|
||||||
for (size_t i = 0; i < scope.textures.size(); ++i) {
|
for (size_t i = 0; i < scope.textures.size(); ++i) {
|
||||||
Texture* texture = ToBackend(scope.textures[i]);
|
Texture* texture = ToBackend(scope.textures[i]);
|
||||||
|
|
||||||
// Clear subresources that are not render attachments. Render attachments will be
|
// Clear subresources that are not render attachments. Render attachments will be
|
||||||
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
||||||
// subresource has not been initialized before the render pass.
|
// subresource has not been initialized before the render pass.
|
||||||
scope.textureUsages[i].Iterate(
|
DAWN_TRY(scope.textureUsages[i].Iterate([&](const SubresourceRange& range,
|
||||||
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
|
wgpu::TextureUsage usage) -> MaybeError {
|
||||||
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
||||||
texture->EnsureSubresourceContentInitialized(commandContext, range);
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
|
||||||
}
|
}
|
||||||
});
|
return {};
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
for (BufferBase* bufferBase : scope.buffers) {
|
for (BufferBase* bufferBase : scope.buffers) {
|
||||||
ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
|
ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
};
|
};
|
||||||
|
|
||||||
Command type;
|
Command type;
|
||||||
|
@ -766,7 +768,7 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
|
||||||
}
|
}
|
||||||
for (const SyncScopeResourceUsage& scope :
|
for (const SyncScopeResourceUsage& scope :
|
||||||
GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
|
GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
|
||||||
LazyClearSyncScope(scope, commandContext);
|
DAWN_TRY(LazyClearSyncScope(scope, commandContext));
|
||||||
}
|
}
|
||||||
commandContext->EndBlit();
|
commandContext->EndBlit();
|
||||||
|
|
||||||
|
@ -793,8 +795,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
|
DAWN_TRY(LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
|
||||||
commandContext);
|
commandContext));
|
||||||
commandContext->EndBlit();
|
commandContext->EndBlit();
|
||||||
|
|
||||||
LazyClearRenderPassAttachments(cmd);
|
LazyClearRenderPassAttachments(cmd);
|
||||||
|
@ -858,7 +860,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
|
||||||
Texture* texture = ToBackend(dst.texture.Get());
|
Texture* texture = ToBackend(dst.texture.Get());
|
||||||
|
|
||||||
buffer->EnsureDataInitialized(commandContext);
|
buffer->EnsureDataInitialized(commandContext);
|
||||||
EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
|
DAWN_TRY(
|
||||||
|
EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize));
|
||||||
|
|
||||||
buffer->TrackUsage();
|
buffer->TrackUsage();
|
||||||
texture->SynchronizeTextureBeforeUse(commandContext);
|
texture->SynchronizeTextureBeforeUse(commandContext);
|
||||||
|
@ -884,8 +887,8 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
|
||||||
buffer->EnsureDataInitializedAsDestination(commandContext, copy);
|
buffer->EnsureDataInitializedAsDestination(commandContext, copy);
|
||||||
|
|
||||||
texture->SynchronizeTextureBeforeUse(commandContext);
|
texture->SynchronizeTextureBeforeUse(commandContext);
|
||||||
texture->EnsureSubresourceContentInitialized(
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(
|
||||||
commandContext, GetSubresourcesAffectedByCopy(src, copySize));
|
commandContext, GetSubresourcesAffectedByCopy(src, copySize)));
|
||||||
buffer->TrackUsage();
|
buffer->TrackUsage();
|
||||||
|
|
||||||
TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
|
TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
|
||||||
|
@ -975,10 +978,10 @@ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext)
|
||||||
|
|
||||||
srcTexture->SynchronizeTextureBeforeUse(commandContext);
|
srcTexture->SynchronizeTextureBeforeUse(commandContext);
|
||||||
dstTexture->SynchronizeTextureBeforeUse(commandContext);
|
dstTexture->SynchronizeTextureBeforeUse(commandContext);
|
||||||
srcTexture->EnsureSubresourceContentInitialized(
|
DAWN_TRY(srcTexture->EnsureSubresourceContentInitialized(
|
||||||
commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
|
commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize)));
|
||||||
EnsureDestinationTextureInitialized(commandContext, dstTexture, copy->destination,
|
DAWN_TRY(EnsureDestinationTextureInitialized(commandContext, dstTexture,
|
||||||
copy->copySize);
|
copy->destination, copy->copySize));
|
||||||
|
|
||||||
const MTLSize sizeOneSlice =
|
const MTLSize sizeOneSlice =
|
||||||
MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
|
MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
|
||||||
|
|
|
@ -395,8 +395,8 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source,
|
||||||
const Extent3D& copySizePixels) {
|
const Extent3D& copySizePixels) {
|
||||||
Texture* texture = ToBackend(dst.texture.Get());
|
Texture* texture = ToBackend(dst.texture.Get());
|
||||||
texture->SynchronizeTextureBeforeUse(GetPendingCommandContext());
|
texture->SynchronizeTextureBeforeUse(GetPendingCommandContext());
|
||||||
EnsureDestinationTextureInitialized(GetPendingCommandContext(DeviceBase::SubmitMode::Passive),
|
DAWN_TRY(EnsureDestinationTextureInitialized(
|
||||||
texture, dst, copySizePixels);
|
GetPendingCommandContext(DeviceBase::SubmitMode::Passive), texture, dst, copySizePixels));
|
||||||
|
|
||||||
RecordCopyBufferToTexture(GetPendingCommandContext(DeviceBase::SubmitMode::Passive),
|
RecordCopyBufferToTexture(GetPendingCommandContext(DeviceBase::SubmitMode::Passive),
|
||||||
ToBackend(source)->GetMTLBuffer(), source->GetSize(),
|
ToBackend(source)->GetMTLBuffer(), source->GetSize(),
|
||||||
|
|
|
@ -58,8 +58,8 @@ class Texture final : public TextureBase {
|
||||||
bool ShouldKeepInitialized() const;
|
bool ShouldKeepInitialized() const;
|
||||||
|
|
||||||
MTLBlitOption ComputeMTLBlitOption(Aspect aspect) const;
|
MTLBlitOption ComputeMTLBlitOption(Aspect aspect) const;
|
||||||
void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
||||||
const SubresourceRange& range);
|
const SubresourceRange& range);
|
||||||
|
|
||||||
void SynchronizeTextureBeforeUse(CommandRecordingContext* commandContext);
|
void SynchronizeTextureBeforeUse(CommandRecordingContext* commandContext);
|
||||||
void IOSurfaceEndAccess(ExternalImageIOSurfaceEndAccessDescriptor* descriptor);
|
void IOSurfaceEndAccess(ExternalImageIOSurfaceEndAccessDescriptor* descriptor);
|
||||||
|
|
|
@ -1076,19 +1076,19 @@ MTLBlitOption Texture::ComputeMTLBlitOption(Aspect aspect) const {
|
||||||
return MTLBlitOptionNone;
|
return MTLBlitOptionNone;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
|
||||||
const SubresourceRange& range) {
|
const SubresourceRange& range) {
|
||||||
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
||||||
return;
|
return {};
|
||||||
}
|
}
|
||||||
if (!IsSubresourceContentInitialized(range)) {
|
if (!IsSubresourceContentInitialized(range)) {
|
||||||
// If subresource has not been initialized, clear it to black as it could
|
// If subresource has not been initialized, clear it to black as it could
|
||||||
// contain dirty bits from recycled memory
|
// contain dirty bits from recycled memory
|
||||||
GetDevice()->ConsumedError(
|
DAWN_TRY(ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
|
||||||
ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
|
|
||||||
SetIsSubresourceContentInitialized(true, range);
|
SetIsSubresourceContentInitialized(true, range);
|
||||||
GetDevice()->IncrementLazyClearCountForTesting();
|
GetDevice()->IncrementLazyClearCountForTesting();
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
|
@ -1165,9 +1165,8 @@ MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
|
||||||
// TODO(enga): Add a workaround to back combined depth/stencil textures
|
// TODO(enga): Add a workaround to back combined depth/stencil textures
|
||||||
// with Sampled usage using two separate textures.
|
// with Sampled usage using two separate textures.
|
||||||
// Or, consider always using the workaround for D32S8.
|
// Or, consider always using the workaround for D32S8.
|
||||||
device->ConsumedError(
|
return DAWN_INTERNAL_ERROR("Cannot create stencil-only texture view of combined "
|
||||||
DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
|
"depth/stencil format.");
|
||||||
"combined depth/stencil format."));
|
|
||||||
}
|
}
|
||||||
} else if (GetTexture()->GetFormat().HasDepth() && GetTexture()->GetFormat().HasStencil()) {
|
} else if (GetTexture()->GetFormat().HasDepth() && GetTexture()->GetFormat().HasStencil()) {
|
||||||
// Depth-only views for depth/stencil textures in Metal simply use the original
|
// Depth-only views for depth/stencil textures in Metal simply use the original
|
||||||
|
|
|
@ -75,10 +75,10 @@ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
|
||||||
uint32_t rowsPerImage,
|
uint32_t rowsPerImage,
|
||||||
Aspect aspect);
|
Aspect aspect);
|
||||||
|
|
||||||
void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
|
MaybeError EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
|
||||||
Texture* texture,
|
Texture* texture,
|
||||||
const TextureCopy& dst,
|
const TextureCopy& dst,
|
||||||
const Extent3D& size);
|
const Extent3D& size);
|
||||||
|
|
||||||
// Allow use MTLStoreActionStoreAndMultismapleResolve because the logic in the backend is
|
// Allow use MTLStoreActionStoreAndMultismapleResolve because the logic in the backend is
|
||||||
// first to compute what the "best" Metal render pass descriptor is, then fix it up if we
|
// first to compute what the "best" Metal render pass descriptor is, then fix it up if we
|
||||||
|
|
|
@ -344,17 +344,18 @@ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
|
||||||
return copy;
|
return copy;
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
|
MaybeError EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
|
||||||
Texture* texture,
|
Texture* texture,
|
||||||
const TextureCopy& dst,
|
const TextureCopy& dst,
|
||||||
const Extent3D& size) {
|
const Extent3D& size) {
|
||||||
ASSERT(texture == dst.texture.Get());
|
ASSERT(texture == dst.texture.Get());
|
||||||
SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
|
SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
|
||||||
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
|
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
|
||||||
texture->SetIsSubresourceContentInitialized(true, range);
|
texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
} else {
|
} else {
|
||||||
texture->EnsureSubresourceContentInitialized(commandContext, range);
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(commandContext, range));
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError EncodeMetalRenderPass(Device* device,
|
MaybeError EncodeMetalRenderPass(Device* device,
|
||||||
|
|
|
@ -451,24 +451,26 @@ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescrip
|
||||||
MaybeError CommandBuffer::Execute() {
|
MaybeError CommandBuffer::Execute() {
|
||||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->GetGL();
|
const OpenGLFunctions& gl = ToBackend(GetDevice())->GetGL();
|
||||||
|
|
||||||
auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
|
auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) -> MaybeError {
|
||||||
for (size_t i = 0; i < scope.textures.size(); i++) {
|
for (size_t i = 0; i < scope.textures.size(); i++) {
|
||||||
Texture* texture = ToBackend(scope.textures[i]);
|
Texture* texture = ToBackend(scope.textures[i]);
|
||||||
|
|
||||||
// Clear subresources that are not render attachments. Render attachments will be
|
// Clear subresources that are not render attachments. Render attachments will be
|
||||||
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
||||||
// subresource has not been initialized before the render pass.
|
// subresource has not been initialized before the render pass.
|
||||||
scope.textureUsages[i].Iterate(
|
DAWN_TRY(scope.textureUsages[i].Iterate(
|
||||||
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
|
[&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError {
|
||||||
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
||||||
texture->EnsureSubresourceContentInitialized(range);
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(range));
|
||||||
}
|
}
|
||||||
});
|
return {};
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (BufferBase* bufferBase : scope.buffers) {
|
for (BufferBase* bufferBase : scope.buffers) {
|
||||||
ToBackend(bufferBase)->EnsureDataInitialized();
|
ToBackend(bufferBase)->EnsureDataInitialized();
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t nextComputePassNumber = 0;
|
size_t nextComputePassNumber = 0;
|
||||||
|
@ -481,7 +483,7 @@ MaybeError CommandBuffer::Execute() {
|
||||||
mCommands.NextCommand<BeginComputePassCmd>();
|
mCommands.NextCommand<BeginComputePassCmd>();
|
||||||
for (const SyncScopeResourceUsage& scope :
|
for (const SyncScopeResourceUsage& scope :
|
||||||
GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
|
GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
|
||||||
LazyClearSyncScope(scope);
|
DAWN_TRY(LazyClearSyncScope(scope));
|
||||||
}
|
}
|
||||||
DAWN_TRY(ExecuteComputePass());
|
DAWN_TRY(ExecuteComputePass());
|
||||||
|
|
||||||
|
@ -491,7 +493,8 @@ MaybeError CommandBuffer::Execute() {
|
||||||
|
|
||||||
case Command::BeginRenderPass: {
|
case Command::BeginRenderPass: {
|
||||||
auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
|
auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
|
||||||
LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
|
DAWN_TRY(
|
||||||
|
LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]));
|
||||||
LazyClearRenderPassAttachments(cmd);
|
LazyClearRenderPassAttachments(cmd);
|
||||||
DAWN_TRY(ExecuteRenderPass(cmd));
|
DAWN_TRY(ExecuteRenderPass(cmd));
|
||||||
|
|
||||||
|
@ -546,7 +549,7 @@ MaybeError CommandBuffer::Execute() {
|
||||||
dst.mipLevel)) {
|
dst.mipLevel)) {
|
||||||
dst.texture->SetIsSubresourceContentInitialized(true, range);
|
dst.texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
} else {
|
} else {
|
||||||
ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
|
DAWN_TRY(ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range));
|
||||||
}
|
}
|
||||||
|
|
||||||
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
|
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
|
||||||
|
@ -593,7 +596,7 @@ MaybeError CommandBuffer::Execute() {
|
||||||
buffer->EnsureDataInitializedAsDestination(copy);
|
buffer->EnsureDataInitializedAsDestination(copy);
|
||||||
|
|
||||||
SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize);
|
SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize);
|
||||||
texture->EnsureSubresourceContentInitialized(subresources);
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(subresources));
|
||||||
// The only way to move data from a texture to a buffer in GL is via
|
// The only way to move data from a texture to a buffer in GL is via
|
||||||
// glReadPixels with a pack buffer. Create a temporary FBO for the copy.
|
// glReadPixels with a pack buffer. Create a temporary FBO for the copy.
|
||||||
gl.BindTexture(target, texture->GetHandle());
|
gl.BindTexture(target, texture->GetHandle());
|
||||||
|
@ -694,11 +697,11 @@ MaybeError CommandBuffer::Execute() {
|
||||||
SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
|
SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
|
||||||
SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
|
SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
|
||||||
|
|
||||||
srcTexture->EnsureSubresourceContentInitialized(srcRange);
|
DAWN_TRY(srcTexture->EnsureSubresourceContentInitialized(srcRange));
|
||||||
if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
|
if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
|
||||||
dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
|
dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
|
||||||
} else {
|
} else {
|
||||||
dstTexture->EnsureSubresourceContentInitialized(dstRange);
|
DAWN_TRY(dstTexture->EnsureSubresourceContentInitialized(dstRange));
|
||||||
}
|
}
|
||||||
CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(), srcTexture->GetGLTarget(),
|
CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(), srcTexture->GetGLTarget(),
|
||||||
src.mipLevel, src.origin, dstTexture->GetHandle(),
|
src.mipLevel, src.origin, dstTexture->GetHandle(),
|
||||||
|
|
|
@ -250,7 +250,7 @@ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
|
||||||
return DAWN_VALIDATION_ERROR("New swapchains not implemented.");
|
return DAWN_VALIDATION_ERROR("New swapchains not implemented.");
|
||||||
}
|
}
|
||||||
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
|
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
|
||||||
return AcquireRef(new Texture(this, descriptor));
|
return Texture::Create(this, descriptor);
|
||||||
}
|
}
|
||||||
ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
|
ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
|
||||||
TextureBase* texture,
|
TextureBase* texture,
|
||||||
|
@ -319,10 +319,10 @@ TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor
|
||||||
if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
|
if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
|
||||||
textureDescriptor->size.height != static_cast<uint32_t>(height) ||
|
textureDescriptor->size.height != static_cast<uint32_t>(height) ||
|
||||||
textureDescriptor->size.depthOrArrayLayers != 1) {
|
textureDescriptor->size.depthOrArrayLayers != 1) {
|
||||||
ConsumedError(DAWN_VALIDATION_ERROR(
|
gl.DeleteTextures(1, &tex);
|
||||||
|
HandleError(DAWN_VALIDATION_ERROR(
|
||||||
"EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
|
"EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
|
||||||
width, height, &textureDescriptor->size));
|
width, height, &textureDescriptor->size));
|
||||||
gl.DeleteTextures(1, &tex);
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
|
||||||
if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel, destination.mipLevel)) {
|
if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel, destination.mipLevel)) {
|
||||||
destination.texture->SetIsSubresourceContentInitialized(true, range);
|
destination.texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
} else {
|
} else {
|
||||||
ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
|
DAWN_TRY(ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range));
|
||||||
}
|
}
|
||||||
DoTexSubImage(ToBackend(GetDevice())->GetGL(), textureCopy, data, dataLayout, writeSizePixel);
|
DoTexSubImage(ToBackend(GetDevice())->GetGL(), textureCopy, data, dataLayout, writeSizePixel);
|
||||||
ToBackend(destination.texture)->Touch();
|
ToBackend(destination.texture)->Touch();
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include "dawn/native/opengl/TextureGL.h"
|
#include "dawn/native/opengl/TextureGL.h"
|
||||||
|
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#include "dawn/common/Assert.h"
|
#include "dawn/common/Assert.h"
|
||||||
#include "dawn/common/Constants.h"
|
#include "dawn/common/Constants.h"
|
||||||
|
@ -170,6 +171,16 @@ void AllocateTexture(const OpenGLFunctions& gl,
|
||||||
|
|
||||||
// Texture
|
// Texture
|
||||||
|
|
||||||
|
// static
|
||||||
|
ResultOrError<Ref<Texture>> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
|
||||||
|
Ref<Texture> texture = AcquireRef(new Texture(device, descriptor));
|
||||||
|
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
|
||||||
|
DAWN_TRY(
|
||||||
|
texture->ClearTexture(texture->GetAllSubresources(), TextureBase::ClearValue::NonZero));
|
||||||
|
}
|
||||||
|
return std::move(texture);
|
||||||
|
}
|
||||||
|
|
||||||
Texture::Texture(Device* device, const TextureDescriptor* descriptor)
|
Texture::Texture(Device* device, const TextureDescriptor* descriptor)
|
||||||
: Texture(device, descriptor, 0, TextureState::OwnedInternal) {
|
: Texture(device, descriptor, 0, TextureState::OwnedInternal) {
|
||||||
const OpenGLFunctions& gl = device->GetGL();
|
const OpenGLFunctions& gl = device->GetGL();
|
||||||
|
@ -186,11 +197,6 @@ Texture::Texture(Device* device, const TextureDescriptor* descriptor)
|
||||||
// The texture is not complete if it uses mipmapping and not all levels up to
|
// The texture is not complete if it uses mipmapping and not all levels up to
|
||||||
// MAX_LEVEL have been defined.
|
// MAX_LEVEL have been defined.
|
||||||
gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
|
gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
|
||||||
|
|
||||||
if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
|
|
||||||
GetDevice()->ConsumedError(
|
|
||||||
ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Texture::Touch() {
|
void Texture::Touch() {
|
||||||
|
@ -539,13 +545,14 @@ MaybeError Texture::ClearTexture(const SubresourceRange& range,
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
|
MaybeError Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
|
||||||
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
||||||
return;
|
return {};
|
||||||
}
|
}
|
||||||
if (!IsSubresourceContentInitialized(range)) {
|
if (!IsSubresourceContentInitialized(range)) {
|
||||||
GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
|
DAWN_TRY(ClearTexture(range, TextureBase::ClearValue::Zero));
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
// TextureView
|
// TextureView
|
||||||
|
|
|
@ -26,7 +26,7 @@ struct GLFormat;
|
||||||
|
|
||||||
class Texture final : public TextureBase {
|
class Texture final : public TextureBase {
|
||||||
public:
|
public:
|
||||||
Texture(Device* device, const TextureDescriptor* descriptor);
|
static ResultOrError<Ref<Texture>> Create(Device* device, const TextureDescriptor* descriptor);
|
||||||
Texture(Device* device, const TextureDescriptor* descriptor, GLuint handle, TextureState state);
|
Texture(Device* device, const TextureDescriptor* descriptor, GLuint handle, TextureState state);
|
||||||
|
|
||||||
GLuint GetHandle() const;
|
GLuint GetHandle() const;
|
||||||
|
@ -35,9 +35,10 @@ class Texture final : public TextureBase {
|
||||||
uint32_t GetGenID() const;
|
uint32_t GetGenID() const;
|
||||||
void Touch();
|
void Touch();
|
||||||
|
|
||||||
void EnsureSubresourceContentInitialized(const SubresourceRange& range);
|
MaybeError EnsureSubresourceContentInitialized(const SubresourceRange& range);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
Texture(Device* device, const TextureDescriptor* descriptor);
|
||||||
~Texture() override;
|
~Texture() override;
|
||||||
|
|
||||||
void DestroyImpl() override;
|
void DestroyImpl() override;
|
||||||
|
|
|
@ -154,9 +154,9 @@ class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
|
||||||
|
|
||||||
// Records the necessary barriers for a synchronization scope using the resource usage
|
// Records the necessary barriers for a synchronization scope using the resource usage
|
||||||
// data pre-computed in the frontend. Also performs lazy initialization if required.
|
// data pre-computed in the frontend. Also performs lazy initialization if required.
|
||||||
void TransitionAndClearForSyncScope(Device* device,
|
MaybeError TransitionAndClearForSyncScope(Device* device,
|
||||||
CommandRecordingContext* recordingContext,
|
CommandRecordingContext* recordingContext,
|
||||||
const SyncScopeResourceUsage& scope) {
|
const SyncScopeResourceUsage& scope) {
|
||||||
std::vector<VkBufferMemoryBarrier> bufferBarriers;
|
std::vector<VkBufferMemoryBarrier> bufferBarriers;
|
||||||
std::vector<VkImageMemoryBarrier> imageBarriers;
|
std::vector<VkImageMemoryBarrier> imageBarriers;
|
||||||
VkPipelineStageFlags srcStages = 0;
|
VkPipelineStageFlags srcStages = 0;
|
||||||
|
@ -179,12 +179,13 @@ void TransitionAndClearForSyncScope(Device* device,
|
||||||
// Clear subresources that are not render attachments. Render attachments will be
|
// Clear subresources that are not render attachments. Render attachments will be
|
||||||
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
// cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
|
||||||
// subresource has not been initialized before the render pass.
|
// subresource has not been initialized before the render pass.
|
||||||
scope.textureUsages[i].Iterate(
|
DAWN_TRY(scope.textureUsages[i].Iterate(
|
||||||
[&](const SubresourceRange& range, wgpu::TextureUsage usage) {
|
[&](const SubresourceRange& range, wgpu::TextureUsage usage) -> MaybeError {
|
||||||
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
if (usage & ~wgpu::TextureUsage::RenderAttachment) {
|
||||||
texture->EnsureSubresourceContentInitialized(recordingContext, range);
|
DAWN_TRY(texture->EnsureSubresourceContentInitialized(recordingContext, range));
|
||||||
}
|
}
|
||||||
});
|
return {};
|
||||||
|
}));
|
||||||
texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i], &imageBarriers,
|
texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i], &imageBarriers,
|
||||||
&srcStages, &dstStages);
|
&srcStages, &dstStages);
|
||||||
}
|
}
|
||||||
|
@ -194,6 +195,7 @@ void TransitionAndClearForSyncScope(Device* device,
|
||||||
nullptr, bufferBarriers.size(), bufferBarriers.data(),
|
nullptr, bufferBarriers.size(), bufferBarriers.data(),
|
||||||
imageBarriers.size(), imageBarriers.data());
|
imageBarriers.size(), imageBarriers.data());
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
|
MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
|
||||||
|
@ -512,8 +514,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
|
||||||
// And resets the used query sets which are rewritten on the render pass.
|
// And resets the used query sets which are rewritten on the render pass.
|
||||||
auto PrepareResourcesForRenderPass = [](Device* device,
|
auto PrepareResourcesForRenderPass = [](Device* device,
|
||||||
CommandRecordingContext* recordingContext,
|
CommandRecordingContext* recordingContext,
|
||||||
const RenderPassResourceUsage& usages) {
|
const RenderPassResourceUsage& usages) -> MaybeError {
|
||||||
TransitionAndClearForSyncScope(device, recordingContext, usages);
|
DAWN_TRY(TransitionAndClearForSyncScope(device, recordingContext, usages));
|
||||||
|
|
||||||
// Reset all query set used on current render pass together before beginning render pass
|
// Reset all query set used on current render pass together before beginning render pass
|
||||||
// because the reset command must be called outside render pass
|
// because the reset command must be called outside render pass
|
||||||
|
@ -521,6 +523,7 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
|
||||||
ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
|
ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
|
||||||
usages.querySets[i], usages.queryAvailabilities[i]);
|
usages.querySets[i], usages.queryAvailabilities[i]);
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t nextComputePassNumber = 0;
|
size_t nextComputePassNumber = 0;
|
||||||
|
@ -580,8 +583,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
|
||||||
// Since texture has been overwritten, it has been "initialized"
|
// Since texture has been overwritten, it has been "initialized"
|
||||||
dst.texture->SetIsSubresourceContentInitialized(true, range);
|
dst.texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
} else {
|
} else {
|
||||||
ToBackend(dst.texture)
|
DAWN_TRY(ToBackend(dst.texture)
|
||||||
->EnsureSubresourceContentInitialized(recordingContext, range);
|
->EnsureSubresourceContentInitialized(recordingContext, range));
|
||||||
}
|
}
|
||||||
ToBackend(src.buffer)
|
ToBackend(src.buffer)
|
||||||
->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
|
->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
|
||||||
|
@ -614,8 +617,8 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
|
||||||
SubresourceRange range =
|
SubresourceRange range =
|
||||||
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
|
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
|
||||||
|
|
||||||
ToBackend(src.texture)
|
DAWN_TRY(ToBackend(src.texture)
|
||||||
->EnsureSubresourceContentInitialized(recordingContext, range);
|
->EnsureSubresourceContentInitialized(recordingContext, range));
|
||||||
|
|
||||||
ToBackend(src.texture)
|
ToBackend(src.texture)
|
||||||
->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
|
->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
|
||||||
|
@ -642,15 +645,15 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
|
||||||
SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
|
SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
|
||||||
SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
|
SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
|
||||||
|
|
||||||
ToBackend(src.texture)
|
DAWN_TRY(ToBackend(src.texture)
|
||||||
->EnsureSubresourceContentInitialized(recordingContext, srcRange);
|
->EnsureSubresourceContentInitialized(recordingContext, srcRange));
|
||||||
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
|
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
|
||||||
dst.mipLevel)) {
|
dst.mipLevel)) {
|
||||||
// Since destination texture has been overwritten, it has been "initialized"
|
// Since destination texture has been overwritten, it has been "initialized"
|
||||||
dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
|
dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
|
||||||
} else {
|
} else {
|
||||||
ToBackend(dst.texture)
|
DAWN_TRY(ToBackend(dst.texture)
|
||||||
->EnsureSubresourceContentInitialized(recordingContext, dstRange);
|
->EnsureSubresourceContentInitialized(recordingContext, dstRange));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
|
if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
|
||||||
|
@ -730,9 +733,9 @@ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingConte
|
||||||
case Command::BeginRenderPass: {
|
case Command::BeginRenderPass: {
|
||||||
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
|
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
|
||||||
|
|
||||||
PrepareResourcesForRenderPass(
|
DAWN_TRY(PrepareResourcesForRenderPass(
|
||||||
device, recordingContext,
|
device, recordingContext,
|
||||||
GetResourceUsages().renderPasses[nextRenderPassNumber]);
|
GetResourceUsages().renderPasses[nextRenderPassNumber]));
|
||||||
|
|
||||||
LazyClearRenderPassAttachments(cmd);
|
LazyClearRenderPassAttachments(cmd);
|
||||||
DAWN_TRY(RecordRenderPass(recordingContext, cmd));
|
DAWN_TRY(RecordRenderPass(recordingContext, cmd));
|
||||||
|
@ -935,8 +938,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingCo
|
||||||
case Command::Dispatch: {
|
case Command::Dispatch: {
|
||||||
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
|
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
|
||||||
|
|
||||||
TransitionAndClearForSyncScope(device, recordingContext,
|
DAWN_TRY(TransitionAndClearForSyncScope(
|
||||||
resourceUsages.dispatchUsages[currentDispatch]);
|
device, recordingContext, resourceUsages.dispatchUsages[currentDispatch]));
|
||||||
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
|
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
|
||||||
|
|
||||||
device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
|
device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
|
||||||
|
@ -948,8 +951,8 @@ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingCo
|
||||||
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
|
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
|
||||||
VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
|
VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
|
||||||
|
|
||||||
TransitionAndClearForSyncScope(device, recordingContext,
|
DAWN_TRY(TransitionAndClearForSyncScope(
|
||||||
resourceUsages.dispatchUsages[currentDispatch]);
|
device, recordingContext, resourceUsages.dispatchUsages[currentDispatch]));
|
||||||
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
|
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
|
||||||
|
|
||||||
device->fn.CmdDispatchIndirect(commands, indirectBuffer,
|
device->fn.CmdDispatchIndirect(commands, indirectBuffer,
|
||||||
|
|
|
@ -816,7 +816,8 @@ MaybeError Device::CopyFromStagingToTextureImpl(const BufferBase* source,
|
||||||
// Since texture has been overwritten, it has been "initialized"
|
// Since texture has been overwritten, it has been "initialized"
|
||||||
dst.texture->SetIsSubresourceContentInitialized(true, range);
|
dst.texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
} else {
|
} else {
|
||||||
ToBackend(dst.texture)->EnsureSubresourceContentInitialized(recordingContext, range);
|
DAWN_TRY(
|
||||||
|
ToBackend(dst.texture)->EnsureSubresourceContentInitialized(recordingContext, range));
|
||||||
}
|
}
|
||||||
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
|
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
|
||||||
// texture.
|
// texture.
|
||||||
|
|
|
@ -1342,17 +1342,17 @@ MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
|
MaybeError Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
|
||||||
const SubresourceRange& range) {
|
const SubresourceRange& range) {
|
||||||
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
||||||
return;
|
return {};
|
||||||
}
|
}
|
||||||
if (!IsSubresourceContentInitialized(range)) {
|
if (!IsSubresourceContentInitialized(range)) {
|
||||||
// If subresource has not been initialized, clear it to black as it could contain dirty
|
// If subresource has not been initialized, clear it to black as it could contain dirty
|
||||||
// bits from recycled memory
|
// bits from recycled memory
|
||||||
GetDevice()->ConsumedError(
|
DAWN_TRY(ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
|
||||||
ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
|
|
||||||
}
|
}
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void Texture::UpdateExternalSemaphoreHandle(ExternalSemaphoreHandle handle) {
|
void Texture::UpdateExternalSemaphoreHandle(ExternalSemaphoreHandle handle) {
|
||||||
|
|
|
@ -84,8 +84,8 @@ class Texture final : public TextureBase {
|
||||||
void TransitionEagerlyForExport(CommandRecordingContext* recordingContext);
|
void TransitionEagerlyForExport(CommandRecordingContext* recordingContext);
|
||||||
std::vector<VkSemaphore> AcquireWaitRequirements();
|
std::vector<VkSemaphore> AcquireWaitRequirements();
|
||||||
|
|
||||||
void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
|
MaybeError EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
|
||||||
const SubresourceRange& range);
|
const SubresourceRange& range);
|
||||||
|
|
||||||
VkImageLayout GetCurrentLayoutForSwapChain() const;
|
VkImageLayout GetCurrentLayoutForSwapChain() const;
|
||||||
|
|
||||||
|
|
|
@ -12,14 +12,19 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "dawn/common/Log.h"
|
#include "dawn/common/Log.h"
|
||||||
#include "dawn/native/SubresourceStorage.h"
|
#include "dawn/native/SubresourceStorage.h"
|
||||||
|
#include "gmock/gmock.h"
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
namespace dawn::native {
|
namespace dawn::native {
|
||||||
|
|
||||||
|
using ::testing::HasSubstr;
|
||||||
|
|
||||||
// A fake class that replicates the behavior of SubresourceStorage but without any compression
|
// A fake class that replicates the behavior of SubresourceStorage but without any compression
|
||||||
// and is used to compare the results of operations on SubresourceStorage against the "ground
|
// and is used to compare the results of operations on SubresourceStorage against the "ground
|
||||||
// truth" of FakeStorage.
|
// truth" of FakeStorage.
|
||||||
|
@ -211,6 +216,31 @@ bool operator==(const SmallData& a, const SmallData& b) {
|
||||||
return a.value == b.value;
|
return a.value == b.value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that the MaybeError version of Iterate returns the first error that it encounters.
|
||||||
|
TEST(SubresourceStorageTest, IterateMaybeError) {
|
||||||
|
// Create a resource with multiple layers of different data so that we can ensure that the
|
||||||
|
// iterate function runs more than once.
|
||||||
|
constexpr uint32_t kLayers = 4;
|
||||||
|
SubresourceStorage<uint32_t> s(Aspect::Color, kLayers, 1);
|
||||||
|
for (uint32_t layer = 0; layer < kLayers; layer++) {
|
||||||
|
s.Update(SubresourceRange::MakeSingle(Aspect::Color, layer, 0),
|
||||||
|
[&](const SubresourceRange&, uint32_t* data) { *data = layer + 1; });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that the first error is returned.
|
||||||
|
uint32_t errorLayer = 0;
|
||||||
|
MaybeError maybeError =
|
||||||
|
s.Iterate([&](const SubresourceRange& range, const uint32_t& layer) -> MaybeError {
|
||||||
|
if (!errorLayer) {
|
||||||
|
errorLayer = layer;
|
||||||
|
}
|
||||||
|
return DAWN_VALIDATION_ERROR("Errored at layer: %d", layer);
|
||||||
|
});
|
||||||
|
ASSERT_TRUE(maybeError.IsError());
|
||||||
|
std::unique_ptr<ErrorData> error = maybeError.AcquireError();
|
||||||
|
EXPECT_THAT(error->GetFormattedMessage(), HasSubstr(std::to_string(errorLayer)));
|
||||||
|
}
|
||||||
|
|
||||||
// Test that the default value is correctly set.
|
// Test that the default value is correctly set.
|
||||||
TEST(SubresourceStorageTest, DefaultValue) {
|
TEST(SubresourceStorageTest, DefaultValue) {
|
||||||
// Test setting no default value for a primitive type.
|
// Test setting no default value for a primitive type.
|
||||||
|
|
Loading…
Reference in New Issue