Simplify ErrorScopes
In upstream WebGPU, error scopes do not wait for queue operations or async operations like create*PipelineAsync or mapAsync. This simplifies the implementation so we don't need to track error scopes by parent pointers but can instead have a simple stack. Bug: dawn:22, chromium:1177107 Change-Id: Ic7344cbd96e257cbabc0f414934a5e42a4020a13 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/41980 Commit-Queue: Austin Eng <enga@chromium.org> Reviewed-by: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
parent
9fdbb74072
commit
ef9e4412f5
|
@ -217,8 +217,6 @@ source_set("dawn_native_sources") {
|
||||||
"ErrorInjector.h",
|
"ErrorInjector.h",
|
||||||
"ErrorScope.cpp",
|
"ErrorScope.cpp",
|
||||||
"ErrorScope.h",
|
"ErrorScope.h",
|
||||||
"ErrorScopeTracker.cpp",
|
|
||||||
"ErrorScopeTracker.h",
|
|
||||||
"Extensions.cpp",
|
"Extensions.cpp",
|
||||||
"Extensions.h",
|
"Extensions.h",
|
||||||
"Fence.cpp",
|
"Fence.cpp",
|
||||||
|
|
|
@ -90,8 +90,6 @@ target_sources(dawn_native PRIVATE
|
||||||
"ErrorInjector.h"
|
"ErrorInjector.h"
|
||||||
"ErrorScope.cpp"
|
"ErrorScope.cpp"
|
||||||
"ErrorScope.h"
|
"ErrorScope.h"
|
||||||
"ErrorScopeTracker.cpp"
|
|
||||||
"ErrorScopeTracker.h"
|
|
||||||
"Extensions.cpp"
|
"Extensions.cpp"
|
||||||
"Extensions.h"
|
"Extensions.h"
|
||||||
"ObjectContentHasher.cpp"
|
"ObjectContentHasher.cpp"
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
#include "dawn_native/DynamicUploader.h"
|
#include "dawn_native/DynamicUploader.h"
|
||||||
#include "dawn_native/ErrorData.h"
|
#include "dawn_native/ErrorData.h"
|
||||||
#include "dawn_native/ErrorScope.h"
|
#include "dawn_native/ErrorScope.h"
|
||||||
#include "dawn_native/ErrorScopeTracker.h"
|
|
||||||
#include "dawn_native/Fence.h"
|
#include "dawn_native/Fence.h"
|
||||||
#include "dawn_native/Instance.h"
|
#include "dawn_native/Instance.h"
|
||||||
#include "dawn_native/InternalPipelineStore.h"
|
#include "dawn_native/InternalPipelineStore.h"
|
||||||
|
@ -98,22 +97,17 @@ namespace dawn_native {
|
||||||
|
|
||||||
MaybeError DeviceBase::Initialize(QueueBase* defaultQueue) {
|
MaybeError DeviceBase::Initialize(QueueBase* defaultQueue) {
|
||||||
mQueue = AcquireRef(defaultQueue);
|
mQueue = AcquireRef(defaultQueue);
|
||||||
mRootErrorScope = AcquireRef(new ErrorScope());
|
|
||||||
mCurrentErrorScope = mRootErrorScope.Get();
|
|
||||||
|
|
||||||
#if defined(DAWN_ENABLE_ASSERTS)
|
#if defined(DAWN_ENABLE_ASSERTS)
|
||||||
mRootErrorScope->SetCallback(
|
mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
|
||||||
[](WGPUErrorType, char const*, void*) {
|
static bool calledOnce = false;
|
||||||
static bool calledOnce = false;
|
if (!calledOnce) {
|
||||||
if (!calledOnce) {
|
calledOnce = true;
|
||||||
calledOnce = true;
|
dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
|
||||||
dawn::WarningLog()
|
"probably not intended. If you really want to ignore errors "
|
||||||
<< "No Dawn device uncaptured error callback was set. This is "
|
"and suppress this message, set the callback to null.";
|
||||||
"probably not intended. If you really want to ignore errors "
|
}
|
||||||
"and suppress this message, set the callback to null.";
|
};
|
||||||
}
|
|
||||||
},
|
|
||||||
nullptr);
|
|
||||||
|
|
||||||
mDeviceLostCallback = [](char const*, void*) {
|
mDeviceLostCallback = [](char const*, void*) {
|
||||||
static bool calledOnce = false;
|
static bool calledOnce = false;
|
||||||
|
@ -127,7 +121,7 @@ namespace dawn_native {
|
||||||
#endif // DAWN_ENABLE_ASSERTS
|
#endif // DAWN_ENABLE_ASSERTS
|
||||||
|
|
||||||
mCaches = std::make_unique<DeviceBase::Caches>();
|
mCaches = std::make_unique<DeviceBase::Caches>();
|
||||||
mErrorScopeTracker = std::make_unique<ErrorScopeTracker>(this);
|
mErrorScopeStack = std::make_unique<ErrorScopeStack>();
|
||||||
mDynamicUploader = std::make_unique<DynamicUploader>(this);
|
mDynamicUploader = std::make_unique<DynamicUploader>(this);
|
||||||
mCreateReadyPipelineTracker = std::make_unique<CreateReadyPipelineTracker>(this);
|
mCreateReadyPipelineTracker = std::make_unique<CreateReadyPipelineTracker>(this);
|
||||||
mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
|
mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
|
||||||
|
@ -146,9 +140,6 @@ namespace dawn_native {
|
||||||
void DeviceBase::ShutDownBase() {
|
void DeviceBase::ShutDownBase() {
|
||||||
// Skip handling device facilities if they haven't even been created (or failed doing so)
|
// Skip handling device facilities if they haven't even been created (or failed doing so)
|
||||||
if (mState != State::BeingCreated) {
|
if (mState != State::BeingCreated) {
|
||||||
// Reject all error scope callbacks.
|
|
||||||
mErrorScopeTracker->ClearForShutDown();
|
|
||||||
|
|
||||||
// Reject all async pipeline creations.
|
// Reject all async pipeline creations.
|
||||||
mCreateReadyPipelineTracker->ClearForShutDown();
|
mCreateReadyPipelineTracker->ClearForShutDown();
|
||||||
}
|
}
|
||||||
|
@ -194,11 +185,6 @@ namespace dawn_native {
|
||||||
// At this point GPU operations are always finished, so we are in the disconnected state.
|
// At this point GPU operations are always finished, so we are in the disconnected state.
|
||||||
mState = State::Disconnected;
|
mState = State::Disconnected;
|
||||||
|
|
||||||
// mCurrentErrorScope can be null if we failed device initialization.
|
|
||||||
if (mCurrentErrorScope != nullptr) {
|
|
||||||
mCurrentErrorScope->UnlinkForShutdown();
|
|
||||||
}
|
|
||||||
mErrorScopeTracker = nullptr;
|
|
||||||
mDynamicUploader = nullptr;
|
mDynamicUploader = nullptr;
|
||||||
mCreateReadyPipelineTracker = nullptr;
|
mCreateReadyPipelineTracker = nullptr;
|
||||||
mPersistentCache = nullptr;
|
mPersistentCache = nullptr;
|
||||||
|
@ -242,16 +228,27 @@ namespace dawn_native {
|
||||||
type = InternalErrorType::DeviceLost;
|
type = InternalErrorType::DeviceLost;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The device was lost, call the application callback.
|
if (type == InternalErrorType::DeviceLost) {
|
||||||
if (type == InternalErrorType::DeviceLost && mDeviceLostCallback != nullptr) {
|
// The device was lost, call the application callback.
|
||||||
|
if (mDeviceLostCallback != nullptr) {
|
||||||
|
mDeviceLostCallback(message, mDeviceLostUserdata);
|
||||||
|
mDeviceLostCallback = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
mQueue->HandleDeviceLoss();
|
mQueue->HandleDeviceLoss();
|
||||||
|
|
||||||
mDeviceLostCallback(message, mDeviceLostUserdata);
|
// Still forward device loss errors to the error scopes so they all reject.
|
||||||
mDeviceLostCallback = nullptr;
|
mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
|
||||||
|
} else {
|
||||||
|
// Pass the error to the error scope stack and call the uncaptured error callback
|
||||||
|
// if it isn't handled. DeviceLost is not handled here because it should be
|
||||||
|
// handled by the lost callback.
|
||||||
|
bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
|
||||||
|
if (!captured && mUncapturedErrorCallback != nullptr) {
|
||||||
|
mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
|
||||||
|
mUncapturedErrorUserdata);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Still forward device loss errors to the error scopes so they all reject.
|
|
||||||
mCurrentErrorScope->HandleError(ToWGPUErrorType(type), message);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeviceBase::InjectError(wgpu::ErrorType type, const char* message) {
|
void DeviceBase::InjectError(wgpu::ErrorType type, const char* message) {
|
||||||
|
@ -282,7 +279,8 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeviceBase::SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
|
void DeviceBase::SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
|
||||||
mRootErrorScope->SetCallback(callback, userdata);
|
mUncapturedErrorCallback = callback;
|
||||||
|
mUncapturedErrorUserdata = userdata;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeviceBase::SetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
|
void DeviceBase::SetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
|
||||||
|
@ -294,24 +292,22 @@ namespace dawn_native {
|
||||||
if (ConsumedError(ValidateErrorFilter(filter))) {
|
if (ConsumedError(ValidateErrorFilter(filter))) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mCurrentErrorScope = AcquireRef(new ErrorScope(filter, mCurrentErrorScope.Get()));
|
mErrorScopeStack->Push(filter);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DeviceBase::PopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
|
bool DeviceBase::PopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
|
||||||
if (DAWN_UNLIKELY(mCurrentErrorScope.Get() == mRootErrorScope.Get())) {
|
if (mErrorScopeStack->Empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
mCurrentErrorScope->SetCallback(callback, userdata);
|
ErrorScope scope = mErrorScopeStack->Pop();
|
||||||
mCurrentErrorScope = Ref<ErrorScope>(mCurrentErrorScope->GetParent());
|
if (callback != nullptr) {
|
||||||
|
callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
|
||||||
|
userdata);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ErrorScope* DeviceBase::GetCurrentErrorScope() {
|
|
||||||
ASSERT(mCurrentErrorScope != nullptr);
|
|
||||||
return mCurrentErrorScope.Get();
|
|
||||||
}
|
|
||||||
|
|
||||||
PersistentCache* DeviceBase::GetPersistentCache() {
|
PersistentCache* DeviceBase::GetPersistentCache() {
|
||||||
ASSERT(mPersistentCache.get() != nullptr);
|
ASSERT(mPersistentCache.get() != nullptr);
|
||||||
return mPersistentCache.get();
|
return mPersistentCache.get();
|
||||||
|
@ -360,10 +356,6 @@ namespace dawn_native {
|
||||||
return GetAdapter()->GetInstance()->GetPlatform();
|
return GetAdapter()->GetInstance()->GetPlatform();
|
||||||
}
|
}
|
||||||
|
|
||||||
ErrorScopeTracker* DeviceBase::GetErrorScopeTracker() const {
|
|
||||||
return mErrorScopeTracker.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
|
ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
|
||||||
return mCompletedSerial;
|
return mCompletedSerial;
|
||||||
}
|
}
|
||||||
|
@ -868,7 +860,6 @@ namespace dawn_native {
|
||||||
// tick the dynamic uploader before the backend resource allocators. This would allow
|
// tick the dynamic uploader before the backend resource allocators. This would allow
|
||||||
// reclaiming resources one tick earlier.
|
// reclaiming resources one tick earlier.
|
||||||
mDynamicUploader->Deallocate(mCompletedSerial);
|
mDynamicUploader->Deallocate(mCompletedSerial);
|
||||||
mErrorScopeTracker->Tick(mCompletedSerial);
|
|
||||||
GetQueue()->Tick(mCompletedSerial);
|
GetQueue()->Tick(mCompletedSerial);
|
||||||
|
|
||||||
mCreateReadyPipelineTracker->Tick(mCompletedSerial);
|
mCreateReadyPipelineTracker->Tick(mCompletedSerial);
|
||||||
|
|
|
@ -35,8 +35,7 @@ namespace dawn_native {
|
||||||
class BindGroupLayoutBase;
|
class BindGroupLayoutBase;
|
||||||
class CreateReadyPipelineTracker;
|
class CreateReadyPipelineTracker;
|
||||||
class DynamicUploader;
|
class DynamicUploader;
|
||||||
class ErrorScope;
|
class ErrorScopeStack;
|
||||||
class ErrorScopeTracker;
|
|
||||||
class PersistentCache;
|
class PersistentCache;
|
||||||
class StagingBufferBase;
|
class StagingBufferBase;
|
||||||
struct InternalPipelineStore;
|
struct InternalPipelineStore;
|
||||||
|
@ -72,8 +71,6 @@ namespace dawn_native {
|
||||||
AdapterBase* GetAdapter() const;
|
AdapterBase* GetAdapter() const;
|
||||||
dawn_platform::Platform* GetPlatform() const;
|
dawn_platform::Platform* GetPlatform() const;
|
||||||
|
|
||||||
ErrorScopeTracker* GetErrorScopeTracker() const;
|
|
||||||
|
|
||||||
// Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
|
// Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
|
||||||
// isn't a valid wgpu::TextureFormat or isn't supported by this device.
|
// isn't a valid wgpu::TextureFormat or isn't supported by this device.
|
||||||
// The pointer returned has the same lifetime as the device.
|
// The pointer returned has the same lifetime as the device.
|
||||||
|
@ -183,8 +180,6 @@ namespace dawn_native {
|
||||||
|
|
||||||
MaybeError ValidateIsAlive() const;
|
MaybeError ValidateIsAlive() const;
|
||||||
|
|
||||||
ErrorScope* GetCurrentErrorScope();
|
|
||||||
|
|
||||||
PersistentCache* GetPersistentCache();
|
PersistentCache* GetPersistentCache();
|
||||||
|
|
||||||
void Reference();
|
void Reference();
|
||||||
|
@ -363,9 +358,14 @@ namespace dawn_native {
|
||||||
// resources.
|
// resources.
|
||||||
virtual MaybeError WaitForIdleForDestruction() = 0;
|
virtual MaybeError WaitForIdleForDestruction() = 0;
|
||||||
|
|
||||||
|
wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
|
||||||
|
void* mUncapturedErrorUserdata = nullptr;
|
||||||
|
|
||||||
wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
|
wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
|
||||||
void* mDeviceLostUserdata = nullptr;
|
void* mDeviceLostUserdata = nullptr;
|
||||||
|
|
||||||
|
std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
|
||||||
|
|
||||||
// The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
|
// The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
|
||||||
// The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
|
// The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
|
||||||
// The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
|
// The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
|
||||||
|
@ -373,9 +373,6 @@ namespace dawn_native {
|
||||||
Ref<InstanceBase> mInstance;
|
Ref<InstanceBase> mInstance;
|
||||||
AdapterBase* mAdapter = nullptr;
|
AdapterBase* mAdapter = nullptr;
|
||||||
|
|
||||||
Ref<ErrorScope> mRootErrorScope;
|
|
||||||
Ref<ErrorScope> mCurrentErrorScope;
|
|
||||||
|
|
||||||
// The object caches aren't exposed in the header as they would require a lot of
|
// The object caches aren't exposed in the header as they would require a lot of
|
||||||
// additional includes.
|
// additional includes.
|
||||||
struct Caches;
|
struct Caches;
|
||||||
|
@ -384,7 +381,6 @@ namespace dawn_native {
|
||||||
Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
|
Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
|
||||||
|
|
||||||
std::unique_ptr<DynamicUploader> mDynamicUploader;
|
std::unique_ptr<DynamicUploader> mDynamicUploader;
|
||||||
std::unique_ptr<ErrorScopeTracker> mErrorScopeTracker;
|
|
||||||
std::unique_ptr<CreateReadyPipelineTracker> mCreateReadyPipelineTracker;
|
std::unique_ptr<CreateReadyPipelineTracker> mCreateReadyPipelineTracker;
|
||||||
Ref<QueueBase> mQueue;
|
Ref<QueueBase> mQueue;
|
||||||
|
|
||||||
|
|
|
@ -18,136 +18,77 @@
|
||||||
|
|
||||||
namespace dawn_native {
|
namespace dawn_native {
|
||||||
|
|
||||||
ErrorScope::ErrorScope() : mIsRoot(true) {
|
namespace {
|
||||||
}
|
|
||||||
|
|
||||||
ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter, ErrorScope* parent)
|
wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
|
||||||
: RefCounted(), mErrorFilter(errorFilter), mParent(parent), mIsRoot(false) {
|
switch (filter) {
|
||||||
ASSERT(mParent != nullptr);
|
case wgpu::ErrorFilter::None:
|
||||||
}
|
return wgpu::ErrorType::NoError;
|
||||||
|
case wgpu::ErrorFilter::Validation:
|
||||||
ErrorScope::~ErrorScope() {
|
return wgpu::ErrorType::Validation;
|
||||||
if (!IsRoot()) {
|
case wgpu::ErrorFilter::OutOfMemory:
|
||||||
RunNonRootCallback();
|
return wgpu::ErrorType::OutOfMemory;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
|
||||||
|
: mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ErrorScope::SetCallback(wgpu::ErrorCallback callback, void* userdata) {
|
wgpu::ErrorType ErrorScope::GetErrorType() const {
|
||||||
mCallback = callback;
|
return mCapturedError;
|
||||||
mUserdata = userdata;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ErrorScope* ErrorScope::GetParent() {
|
const char* ErrorScope::GetErrorMessage() const {
|
||||||
return mParent.Get();
|
return mErrorMessage.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ErrorScope::IsRoot() const {
|
void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
|
||||||
return mIsRoot;
|
mScopes.push_back(ErrorScope(filter));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ErrorScope::RunNonRootCallback() {
|
ErrorScope ErrorScopeStack::Pop() {
|
||||||
ASSERT(!IsRoot());
|
ASSERT(!mScopes.empty());
|
||||||
|
ErrorScope scope = std::move(mScopes.back());
|
||||||
if (mCallback != nullptr) {
|
mScopes.pop_back();
|
||||||
// For non-root error scopes, the callback can run at most once.
|
return scope;
|
||||||
mCallback(static_cast<WGPUErrorType>(mErrorType), mErrorMessage.c_str(), mUserdata);
|
|
||||||
mCallback = nullptr;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ErrorScope::HandleError(wgpu::ErrorType type, const char* message) {
|
bool ErrorScopeStack::Empty() const {
|
||||||
HandleErrorImpl(this, type, message);
|
return mScopes.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ErrorScope::UnlinkForShutdown() {
|
bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
|
||||||
UnlinkForShutdownImpl(this);
|
ASSERT(type != wgpu::ErrorType::NoError);
|
||||||
}
|
for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
|
||||||
|
if (it->mMatchedErrorType != type) {
|
||||||
// static
|
// Error filter does not match. Move on to the next scope.
|
||||||
void ErrorScope::HandleErrorImpl(ErrorScope* scope, wgpu::ErrorType type, const char* message) {
|
continue;
|
||||||
ErrorScope* currentScope = scope;
|
|
||||||
for (; !currentScope->IsRoot(); currentScope = currentScope->GetParent()) {
|
|
||||||
ASSERT(currentScope != nullptr);
|
|
||||||
|
|
||||||
bool consumed = false;
|
|
||||||
switch (type) {
|
|
||||||
case wgpu::ErrorType::Validation:
|
|
||||||
if (currentScope->mErrorFilter != wgpu::ErrorFilter::Validation) {
|
|
||||||
// Error filter does not match. Move on to the next scope.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
consumed = true;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case wgpu::ErrorType::OutOfMemory:
|
|
||||||
if (currentScope->mErrorFilter != wgpu::ErrorFilter::OutOfMemory) {
|
|
||||||
// Error filter does not match. Move on to the next scope.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
consumed = true;
|
|
||||||
break;
|
|
||||||
|
|
||||||
// DeviceLost is fatal. All error scopes capture them.
|
|
||||||
// |consumed| is false because these should bubble to all scopes.
|
|
||||||
case wgpu::ErrorType::DeviceLost:
|
|
||||||
consumed = false;
|
|
||||||
if (currentScope->mErrorType != wgpu::ErrorType::DeviceLost) {
|
|
||||||
// DeviceLost overrides any other error that is not a DeviceLost.
|
|
||||||
currentScope->mErrorType = type;
|
|
||||||
currentScope->mErrorMessage = message;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case wgpu::ErrorType::Unknown:
|
|
||||||
// Means the scope was destroyed before contained work finished.
|
|
||||||
// This happens when you destroy the device while there's pending work.
|
|
||||||
// That's handled in ErrorScope::UnlinkForShutdownImpl, not here.
|
|
||||||
case wgpu::ErrorType::NoError:
|
|
||||||
// Not considered an error, and should never be passed to HandleError.
|
|
||||||
UNREACHABLE();
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Filter matches.
|
||||||
// Record the error if the scope doesn't have one yet.
|
// Record the error if the scope doesn't have one yet.
|
||||||
if (currentScope->mErrorType == wgpu::ErrorType::NoError) {
|
if (it->mCapturedError == wgpu::ErrorType::NoError) {
|
||||||
currentScope->mErrorType = type;
|
it->mCapturedError = type;
|
||||||
currentScope->mErrorMessage = message;
|
it->mErrorMessage = message;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (consumed) {
|
if (type == wgpu::ErrorType::DeviceLost) {
|
||||||
return;
|
if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
|
||||||
|
// DeviceLost overrides any other error that is not a DeviceLost.
|
||||||
|
it->mCapturedError = type;
|
||||||
|
it->mErrorMessage = message;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Errors that are not device lost are captured and stop propogating.
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The root error scope captures all uncaptured errors.
|
// The error was not captured.
|
||||||
// Except, it should not capture device lost errors since those go to
|
return false;
|
||||||
// the device lost callback.
|
|
||||||
ASSERT(currentScope->IsRoot());
|
|
||||||
if (currentScope->mCallback && type != wgpu::ErrorType::DeviceLost) {
|
|
||||||
currentScope->mCallback(static_cast<WGPUErrorType>(type), message,
|
|
||||||
currentScope->mUserdata);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// static
|
|
||||||
void ErrorScope::UnlinkForShutdownImpl(ErrorScope* scope) {
|
|
||||||
Ref<ErrorScope> currentScope = scope;
|
|
||||||
Ref<ErrorScope> parentScope = nullptr;
|
|
||||||
for (; !currentScope->IsRoot(); currentScope = parentScope.Get()) {
|
|
||||||
ASSERT(!currentScope->IsRoot());
|
|
||||||
ASSERT(currentScope != nullptr);
|
|
||||||
parentScope = std::move(currentScope->mParent);
|
|
||||||
ASSERT(parentScope != nullptr);
|
|
||||||
|
|
||||||
// On shutdown, error scopes that have yet to have a status get Unknown.
|
|
||||||
if (currentScope->mErrorType == wgpu::ErrorType::NoError) {
|
|
||||||
currentScope->mErrorType = wgpu::ErrorType::Unknown;
|
|
||||||
currentScope->mErrorMessage = "Error scope destroyed";
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the callback if it hasn't run already.
|
|
||||||
currentScope->RunNonRootCallback();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace dawn_native
|
} // namespace dawn_native
|
||||||
|
|
|
@ -17,54 +17,41 @@
|
||||||
|
|
||||||
#include "dawn_native/dawn_platform.h"
|
#include "dawn_native/dawn_platform.h"
|
||||||
|
|
||||||
#include "common/RefCounted.h"
|
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace dawn_native {
|
namespace dawn_native {
|
||||||
|
|
||||||
// Errors can be recorded into an ErrorScope by calling |HandleError|.
|
class ErrorScope {
|
||||||
// Because an error scope should not resolve until contained
|
|
||||||
// commands are complete, calling the callback is deferred until it is destructed.
|
|
||||||
// In-flight commands or asynchronous events should hold a reference to the
|
|
||||||
// ErrorScope for their duration.
|
|
||||||
//
|
|
||||||
// Because parent ErrorScopes should not resolve before child ErrorScopes,
|
|
||||||
// ErrorScopes hold a reference to their parent.
|
|
||||||
//
|
|
||||||
// To simplify ErrorHandling, there is a sentinel root error scope which has
|
|
||||||
// no parent. All uncaptured errors are handled by the root error scope. Its
|
|
||||||
// callback is called immediately once it encounters an error.
|
|
||||||
class ErrorScope final : public RefCounted {
|
|
||||||
public:
|
public:
|
||||||
ErrorScope(); // Constructor for the root error scope.
|
wgpu::ErrorType GetErrorType() const;
|
||||||
ErrorScope(wgpu::ErrorFilter errorFilter, ErrorScope* parent);
|
const char* GetErrorMessage() const;
|
||||||
|
|
||||||
void SetCallback(wgpu::ErrorCallback callback, void* userdata);
|
|
||||||
ErrorScope* GetParent();
|
|
||||||
|
|
||||||
void HandleError(wgpu::ErrorType type, const char* message);
|
|
||||||
void UnlinkForShutdown();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
~ErrorScope() override;
|
friend class ErrorScopeStack;
|
||||||
bool IsRoot() const;
|
explicit ErrorScope(wgpu::ErrorFilter errorFilter);
|
||||||
void RunNonRootCallback();
|
|
||||||
|
|
||||||
static void HandleErrorImpl(ErrorScope* scope, wgpu::ErrorType type, const char* message);
|
wgpu::ErrorType mMatchedErrorType;
|
||||||
static void UnlinkForShutdownImpl(ErrorScope* scope);
|
wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
|
||||||
|
|
||||||
wgpu::ErrorFilter mErrorFilter = wgpu::ErrorFilter::None;
|
|
||||||
Ref<ErrorScope> mParent = nullptr;
|
|
||||||
bool mIsRoot;
|
|
||||||
|
|
||||||
wgpu::ErrorCallback mCallback = nullptr;
|
|
||||||
void* mUserdata = nullptr;
|
|
||||||
|
|
||||||
wgpu::ErrorType mErrorType = wgpu::ErrorType::NoError;
|
|
||||||
std::string mErrorMessage = "";
|
std::string mErrorMessage = "";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ErrorScopeStack {
|
||||||
|
public:
|
||||||
|
void Push(wgpu::ErrorFilter errorFilter);
|
||||||
|
ErrorScope Pop();
|
||||||
|
|
||||||
|
bool Empty() const;
|
||||||
|
|
||||||
|
// Pass an error to the scopes in the stack. Returns true if one of the scopes
|
||||||
|
// captured the error. Returns false if the error should be forwarded to the
|
||||||
|
// uncaptured error callback.
|
||||||
|
bool HandleError(wgpu::ErrorType type, const char* message);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<ErrorScope> mScopes;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace dawn_native
|
} // namespace dawn_native
|
||||||
|
|
||||||
#endif // DAWNNATIVE_ERRORSCOPE_H_
|
#endif // DAWNNATIVE_ERRORSCOPE_H_
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
// Copyright 2019 The Dawn Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#include "dawn_native/ErrorScopeTracker.h"
|
|
||||||
|
|
||||||
#include "dawn_native/Device.h"
|
|
||||||
#include "dawn_native/ErrorScope.h"
|
|
||||||
|
|
||||||
#include <limits>
|
|
||||||
|
|
||||||
namespace dawn_native {
|
|
||||||
|
|
||||||
ErrorScopeTracker::ErrorScopeTracker(DeviceBase* device) : mDevice(device) {
|
|
||||||
}
|
|
||||||
|
|
||||||
ErrorScopeTracker::~ErrorScopeTracker() {
|
|
||||||
ASSERT(mScopesInFlight.Empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
void ErrorScopeTracker::TrackUntilLastSubmitComplete(ErrorScope* scope) {
|
|
||||||
mScopesInFlight.Enqueue(scope, mDevice->GetLastSubmittedCommandSerial());
|
|
||||||
mDevice->AddFutureSerial(mDevice->GetPendingCommandSerial());
|
|
||||||
}
|
|
||||||
|
|
||||||
void ErrorScopeTracker::Tick(ExecutionSerial completedSerial) {
|
|
||||||
mScopesInFlight.ClearUpTo(completedSerial);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ErrorScopeTracker::ClearForShutDown() {
|
|
||||||
for (Ref<ErrorScope>& scope : mScopesInFlight.IterateAll()) {
|
|
||||||
scope->UnlinkForShutdown();
|
|
||||||
}
|
|
||||||
mScopesInFlight.Clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace dawn_native
|
|
|
@ -1,44 +0,0 @@
|
||||||
// Copyright 2019 The Dawn Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#ifndef DAWNNATIVE_ERRORSCOPETRACKER_H_
|
|
||||||
#define DAWNNATIVE_ERRORSCOPETRACKER_H_
|
|
||||||
|
|
||||||
#include "common/RefCounted.h"
|
|
||||||
#include "common/SerialQueue.h"
|
|
||||||
#include "dawn_native/IntegerTypes.h"
|
|
||||||
|
|
||||||
namespace dawn_native {
|
|
||||||
|
|
||||||
class DeviceBase;
|
|
||||||
class ErrorScope;
|
|
||||||
|
|
||||||
class ErrorScopeTracker {
|
|
||||||
public:
|
|
||||||
ErrorScopeTracker(DeviceBase* device);
|
|
||||||
~ErrorScopeTracker();
|
|
||||||
|
|
||||||
void TrackUntilLastSubmitComplete(ErrorScope* scope);
|
|
||||||
|
|
||||||
void Tick(ExecutionSerial completedSerial);
|
|
||||||
void ClearForShutDown();
|
|
||||||
|
|
||||||
protected:
|
|
||||||
DeviceBase* mDevice;
|
|
||||||
SerialQueue<ExecutionSerial, Ref<ErrorScope>> mScopesInFlight;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace dawn_native
|
|
||||||
|
|
||||||
#endif // DAWNNATIVE_ERRORSCOPETRACKER_H_
|
|
|
@ -23,8 +23,6 @@
|
||||||
#include "dawn_native/CopyTextureForBrowserHelper.h"
|
#include "dawn_native/CopyTextureForBrowserHelper.h"
|
||||||
#include "dawn_native/Device.h"
|
#include "dawn_native/Device.h"
|
||||||
#include "dawn_native/DynamicUploader.h"
|
#include "dawn_native/DynamicUploader.h"
|
||||||
#include "dawn_native/ErrorScope.h"
|
|
||||||
#include "dawn_native/ErrorScopeTracker.h"
|
|
||||||
#include "dawn_native/Fence.h"
|
#include "dawn_native/Fence.h"
|
||||||
#include "dawn_native/QuerySet.h"
|
#include "dawn_native/QuerySet.h"
|
||||||
#include "dawn_native/RenderPassEncoder.h"
|
#include "dawn_native/RenderPassEncoder.h"
|
||||||
|
@ -176,8 +174,6 @@ namespace dawn_native {
|
||||||
|
|
||||||
fence->SetSignaledValue(signalValue);
|
fence->SetSignaledValue(signalValue);
|
||||||
fence->UpdateFenceOnComplete(fence, signalValue);
|
fence->UpdateFenceOnComplete(fence, signalValue);
|
||||||
device->GetErrorScopeTracker()->TrackUntilLastSubmitComplete(
|
|
||||||
device->GetCurrentErrorScope());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
|
void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
|
||||||
|
@ -487,9 +483,6 @@ namespace dawn_native {
|
||||||
if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
|
if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
device->GetErrorScopeTracker()->TrackUntilLastSubmitComplete(
|
|
||||||
device->GetCurrentErrorScope());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace dawn_native
|
} // namespace dawn_native
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "tests/MockCallback.h"
|
||||||
#include "tests/unittests/validation/ValidationTest.h"
|
#include "tests/unittests/validation/ValidationTest.h"
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
|
@ -140,64 +141,64 @@ TEST_F(ErrorScopeValidationTest, PushPopBalanced) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that error scopes do not call their callbacks until after an enclosed Queue::Submit
|
// Test that error scopes call their callbacks before an enclosed Queue::Submit
|
||||||
// completes
|
// completes
|
||||||
TEST_F(ErrorScopeValidationTest, CallbackAfterQueueSubmit) {
|
TEST_F(ErrorScopeValidationTest, EnclosedQueueSubmit) {
|
||||||
wgpu::Queue queue = device.GetQueue();
|
wgpu::Queue queue = device.GetQueue();
|
||||||
|
|
||||||
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
||||||
|
|
||||||
queue.Submit(0, nullptr);
|
queue.Submit(0, nullptr);
|
||||||
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
|
wgpu::Fence fence = queue.CreateFence();
|
||||||
|
queue.Signal(fence, 1);
|
||||||
|
|
||||||
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this)).Times(1);
|
testing::Sequence seq;
|
||||||
|
|
||||||
// Side effects of Queue::Submit only are seen after Tick()
|
MockCallback<WGPUFenceOnCompletionCallback> fenceCallback;
|
||||||
device.Tick();
|
fence.OnCompletion(1, fenceCallback.Callback(), fenceCallback.MakeUserdata(this));
|
||||||
FlushWire();
|
|
||||||
|
MockCallback<WGPUErrorCallback> errorScopeCallback;
|
||||||
|
EXPECT_CALL(errorScopeCallback, Call(WGPUErrorType_NoError, _, this + 1)).InSequence(seq);
|
||||||
|
device.PopErrorScope(errorScopeCallback.Callback(), errorScopeCallback.MakeUserdata(this + 1));
|
||||||
|
|
||||||
|
EXPECT_CALL(fenceCallback, Call(WGPUFenceCompletionStatus_Success, this)).InSequence(seq);
|
||||||
|
WaitForAllOperations(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that parent error scopes do not call their callbacks until after an enclosed Queue::Submit
|
// Test that parent error scopes also call their callbacks before an enclosed Queue::Submit
|
||||||
// completes
|
// completes
|
||||||
TEST_F(ErrorScopeValidationTest, CallbackAfterQueueSubmitNested) {
|
TEST_F(ErrorScopeValidationTest, EnclosedQueueSubmitNested) {
|
||||||
wgpu::Queue queue = device.GetQueue();
|
wgpu::Queue queue = device.GetQueue();
|
||||||
|
|
||||||
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
||||||
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
||||||
|
|
||||||
queue.Submit(0, nullptr);
|
queue.Submit(0, nullptr);
|
||||||
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
|
wgpu::Fence fence = queue.CreateFence();
|
||||||
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this + 1);
|
queue.Signal(fence, 1);
|
||||||
|
|
||||||
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this)).Times(1);
|
testing::Sequence seq;
|
||||||
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this + 1))
|
|
||||||
.Times(1);
|
|
||||||
|
|
||||||
// Side effects of Queue::Submit only are seen after Tick()
|
MockCallback<WGPUFenceOnCompletionCallback> fenceCallback;
|
||||||
device.Tick();
|
fence.OnCompletion(1, fenceCallback.Callback(), fenceCallback.MakeUserdata(this));
|
||||||
FlushWire();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test a callback that returns asynchronously followed by a synchronous one
|
MockCallback<WGPUErrorCallback> errorScopeCallback2;
|
||||||
TEST_F(ErrorScopeValidationTest, AsynchronousThenSynchronous) {
|
EXPECT_CALL(errorScopeCallback2, Call(WGPUErrorType_NoError, _, this + 1)).InSequence(seq);
|
||||||
wgpu::Queue queue = device.GetQueue();
|
device.PopErrorScope(errorScopeCallback2.Callback(),
|
||||||
|
errorScopeCallback2.MakeUserdata(this + 1));
|
||||||
|
|
||||||
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
MockCallback<WGPUErrorCallback> errorScopeCallback1;
|
||||||
queue.Submit(0, nullptr);
|
EXPECT_CALL(errorScopeCallback1, Call(WGPUErrorType_NoError, _, this + 2)).InSequence(seq);
|
||||||
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
|
device.PopErrorScope(errorScopeCallback1.Callback(),
|
||||||
|
errorScopeCallback1.MakeUserdata(this + 2));
|
||||||
|
|
||||||
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this + 1))
|
EXPECT_CALL(fenceCallback, Call(WGPUFenceCompletionStatus_Success, this)).InSequence(seq);
|
||||||
.Times(1);
|
WaitForAllOperations(device);
|
||||||
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
|
||||||
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this + 1);
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this)).Times(1);
|
|
||||||
|
|
||||||
// Side effects of Queue::Submit only are seen after Tick()
|
|
||||||
device.Tick();
|
|
||||||
FlushWire();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that if the device is destroyed before the callback occurs, it is called with NoError
|
// Test that if the device is destroyed before the callback occurs, it is called with NoError
|
||||||
// because all previous operations are waited upon before the destruction returns.
|
// in dawn_native, but Unknown in dawn_wire because the device is destroyed before the callback
|
||||||
|
// message happens.
|
||||||
TEST_F(ErrorScopeValidationTest, DeviceDestroyedBeforeCallback) {
|
TEST_F(ErrorScopeValidationTest, DeviceDestroyedBeforeCallback) {
|
||||||
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
device.PushErrorScope(wgpu::ErrorFilter::OutOfMemory);
|
||||||
{
|
{
|
||||||
|
@ -205,10 +206,20 @@ TEST_F(ErrorScopeValidationTest, DeviceDestroyedBeforeCallback) {
|
||||||
wgpu::Queue queue = device.GetQueue();
|
wgpu::Queue queue = device.GetQueue();
|
||||||
queue.Submit(0, nullptr);
|
queue.Submit(0, nullptr);
|
||||||
}
|
}
|
||||||
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
|
|
||||||
|
|
||||||
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Unknown, _, this)).Times(1);
|
if (UsesWire()) {
|
||||||
device = nullptr;
|
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_Unknown, _, this))
|
||||||
|
.Times(1);
|
||||||
|
device = nullptr;
|
||||||
|
} else {
|
||||||
|
EXPECT_CALL(*mockDevicePopErrorScopeCallback, Call(WGPUErrorType_NoError, _, this))
|
||||||
|
.Times(1);
|
||||||
|
device.PopErrorScope(ToMockDevicePopErrorScopeCallback, this);
|
||||||
|
|
||||||
|
device = nullptr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Regression test that on device shutdown, we don't get a recursion in O(pushed error scope) that
|
// Regression test that on device shutdown, we don't get a recursion in O(pushed error scope) that
|
||||||
|
|
Loading…
Reference in New Issue