D3D12: Support creating compute pipeline asynchronously

This patch implements the asynchronous path of CreateComputePipelineAsync
on D3D12 backend with the basic framework of the dawn_unittest
AsyncTaskTest.Basic.

1. Call the constructor of dawn_native::d3d12::ComputePipeline in the main
   thread.
2. Execute dawn_native::ComputePipelineBase::Initialize() (a virtual function)
   asynchronously.
3. Ensure every operation in dawn_native::d3d12::ComputePipeline::Initialize()
   is thread-safe (PersistentCache).
4. Save all the return values (pipeline object or error message, userdata, etc)
   in a CreateComputePipelineAsyncWaitableCallbackTask object and insert this
   callback task into CallbackTaskManager.
5. In Callback.Finish():
- Insert the pipeline object into the pipeline cache if necessary
- Call WGPUCreateComputePipelineAsyncCallback

Note that as we always handle the front-end pipeline cache in the main thread,
we don't need to make it thread-safe right now.

BUG=dawn:529
TEST=dawn_end2end_tests

Change-Id: I7eba2ce550b32439a94b2a4d1aa7f1b3383aa514
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/47900
Commit-Queue: Jiawei Shao <jiawei.shao@intel.com>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
Jiawei Shao 2021-06-04 05:12:06 +00:00 committed by Dawn LUCI CQ
parent 2adb3c83e4
commit 5e1ca53269
19 changed files with 270 additions and 26 deletions

View File

@ -4,11 +4,11 @@
namespace dawn_native {
AsnycTaskManager::AsnycTaskManager(dawn_platform::WorkerTaskPool* workerTaskPool)
AsyncTaskManager::AsyncTaskManager(dawn_platform::WorkerTaskPool* workerTaskPool)
: mWorkerTaskPool(workerTaskPool) {
}
void AsnycTaskManager::PostTask(AsyncTask asyncTask) {
void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
// If these allocations becomes expensive, we can slab-allocate tasks.
Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
waitableTask->taskManager = this;
@ -30,7 +30,7 @@ namespace dawn_native {
mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
}
void AsnycTaskManager::HandleTaskCompletion(WaitableTask* task) {
void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
auto iter = mPendingTasks.find(task);
if (iter != mPendingTasks.end()) {
@ -38,7 +38,7 @@ namespace dawn_native {
}
}
void AsnycTaskManager::WaitAllPendingTasks() {
void AsyncTaskManager::WaitAllPendingTasks() {
std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
{
@ -51,7 +51,12 @@ namespace dawn_native {
}
}
void AsnycTaskManager::DoWaitableTask(void* task) {
bool AsyncTaskManager::HasPendingTasks() {
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
return !mPendingTasks.empty();
}
void AsyncTaskManager::DoWaitableTask(void* task) {
Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
waitableTask->asyncTask();
waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());

View File

@ -36,18 +36,19 @@ namespace dawn_native {
// task if we need it for synchronous pipeline compilation.
using AsyncTask = std::function<void()>;
class AsnycTaskManager {
class AsyncTaskManager {
public:
explicit AsnycTaskManager(dawn_platform::WorkerTaskPool* workerTaskPool);
explicit AsyncTaskManager(dawn_platform::WorkerTaskPool* workerTaskPool);
void PostTask(AsyncTask asyncTask);
void WaitAllPendingTasks();
bool HasPendingTasks();
private:
class WaitableTask : public RefCounted {
public:
AsyncTask asyncTask;
AsnycTaskManager* taskManager;
AsyncTaskManager* taskManager;
std::unique_ptr<dawn_platform::WaitableEvent> waitableEvent;
};

View File

@ -56,6 +56,10 @@ namespace dawn_native {
}
}
MaybeError ComputePipelineBase::Initialize(const ComputePipelineDescriptor* descriptor) {
return {};
}
// static
ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
return new ComputePipelineBase(device, ObjectBase::kError);

View File

@ -39,6 +39,11 @@ namespace dawn_native {
private:
ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
// CreateComputePipelineAsyncTask is declared as a friend of ComputePipelineBase as it
// needs to call the private member function ComputePipelineBase::Initialize().
friend class CreateComputePipelineAsyncTask;
virtual MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
};
} // namespace dawn_native

View File

@ -14,6 +14,7 @@
#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/AsyncTask.h"
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/Device.h"
#include "dawn_native/RenderPipeline.h"
@ -100,4 +101,58 @@ namespace dawn_native {
"Device lost before callback", mUserData);
}
CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
Ref<ComputePipelineBase> nonInitializedComputePipeline,
const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata)
: mComputePipeline(nonInitializedComputePipeline),
mBlueprintHash(blueprintHash),
mCallback(callback),
mUserdata(userdata),
mLabel(descriptor->label != nullptr ? descriptor->label : ""),
mLayout(descriptor->layout),
mEntryPoint(descriptor->computeStage.entryPoint),
mComputeShaderModule(descriptor->computeStage.module) {
ASSERT(mComputePipeline != nullptr);
// TODO(jiawei.shao@intel.com): save nextInChain when it is supported in Dawn.
ASSERT(descriptor->nextInChain == nullptr);
}
void CreateComputePipelineAsyncTask::Run() {
ComputePipelineDescriptor descriptor;
if (!mLabel.empty()) {
descriptor.label = mLabel.c_str();
}
descriptor.computeStage.entryPoint = mEntryPoint.c_str();
descriptor.layout = mLayout.Get();
descriptor.computeStage.module = mComputeShaderModule.Get();
MaybeError maybeError = mComputePipeline->Initialize(&descriptor);
std::string errorMessage;
if (maybeError.IsError()) {
mComputePipeline = nullptr;
errorMessage = maybeError.AcquireError()->GetMessage();
}
mComputeShaderModule = nullptr;
mComputePipeline->GetDevice()->AddComputePipelineAsyncCallbackTask(
mComputePipeline, errorMessage, mCallback, mUserdata, mBlueprintHash);
}
void CreateComputePipelineAsyncTask::RunAsync(
std::unique_ptr<CreateComputePipelineAsyncTask> task) {
DeviceBase* device = task->mComputePipeline->GetDevice();
// Using "taskPtr = std::move(task)" causes compilation error while it should be supported
// since C++14:
// https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
auto asyncTask = [taskPtr = task.release()] {
std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
innnerTaskPtr->Run();
};
device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
}
} // namespace dawn_native

View File

@ -18,12 +18,16 @@
#include "common/RefCounted.h"
#include "dawn/webgpu.h"
#include "dawn_native/CallbackTaskManager.h"
#include "dawn_native/Error.h"
namespace dawn_native {
class ComputePipelineBase;
class DeviceBase;
class PipelineLayoutBase;
class RenderPipelineBase;
class ShaderModuleBase;
struct ComputePipelineDescriptor;
struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
@ -33,17 +37,17 @@ namespace dawn_native {
void* mUserData;
};
struct CreateComputePipelineAsyncCallbackTask final : CreatePipelineAsyncCallbackTaskBase {
struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
std::string errorMessage,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
void Finish() final;
void Finish() override;
void HandleShutDown() final;
void HandleDeviceLoss() final;
private:
protected:
Ref<ComputePipelineBase> mPipeline;
WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
};
@ -63,6 +67,37 @@ namespace dawn_native {
WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
};
// CreateComputePipelineAsyncTask defines all the inputs and outputs of
// CreateComputePipelineAsync() tasks, which are the same among all the backends.
// TODO(crbug.com/dawn/529): Define a "flat descriptor"
// (like utils::ComboRenderPipelineDescriptor) in ComputePipeline.h that's reused here and for
// caching, etc. ValidateComputePipelineDescriptor() could produce that flat descriptor so that
// it is reused in other places.
class CreateComputePipelineAsyncTask {
public:
CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
virtual ~CreateComputePipelineAsyncTask() = default;
void Run();
static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
protected:
Ref<ComputePipelineBase> mComputePipeline;
size_t mBlueprintHash;
WGPUCreateComputePipelineAsyncCallback mCallback;
void* mUserdata;
std::string mLabel;
Ref<PipelineLayoutBase> mLayout;
std::string mEntryPoint;
Ref<ShaderModuleBase> mComputeShaderModule;
};
} // namespace dawn_native
#endif // DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_

View File

@ -16,11 +16,11 @@
#include "common/Log.h"
#include "dawn_native/Adapter.h"
#include "dawn_native/AsyncTask.h"
#include "dawn_native/AttachmentState.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/CallbackTaskManager.h"
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/CompilationMessages.h"
@ -44,6 +44,7 @@
#include "dawn_native/SwapChain.h"
#include "dawn_native/Texture.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include "dawn_platform/DawnPlatform.h"
#include <unordered_set>
@ -130,6 +131,10 @@ namespace dawn_native {
mInternalPipelineStore = std::make_unique<InternalPipelineStore>();
mPersistentCache = std::make_unique<PersistentCache>(this);
ASSERT(GetPlatform() != nullptr);
mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
// Starting from now the backend can start doing reentrant calls so the device is marked as
// alive.
mState = State::Alive;
@ -143,6 +148,8 @@ namespace dawn_native {
// Skip handling device facilities if they haven't even been created (or failed doing so)
if (mState != State::BeingCreated) {
// Call all the callbacks immediately as the device is about to shut down.
// TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
mAsyncTaskManager->WaitAllPendingTasks();
auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
callbackTask->HandleShutDown();
@ -192,6 +199,7 @@ namespace dawn_native {
mDynamicUploader = nullptr;
mCallbackTaskManager = nullptr;
mAsyncTaskManager = nullptr;
mPersistentCache = nullptr;
mEmptyBindGroupLayout = nullptr;
@ -241,6 +249,9 @@ namespace dawn_native {
}
mQueue->HandleDeviceLoss();
// TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
mAsyncTaskManager->WaitAllPendingTasks();
auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
callbackTask->HandleDeviceLoss();
@ -393,6 +404,10 @@ namespace dawn_native {
}
bool DeviceBase::IsDeviceIdle() {
if (mAsyncTaskManager->HasPendingTasks()) {
return false;
}
ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
if (mCompletedSerial == maxSerial) {
return true;
@ -1271,4 +1286,60 @@ namespace dawn_native {
}
}
AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
return mAsyncTaskManager.get();
}
CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
return mCallbackTaskManager.get();
}
dawn_platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
return mWorkerTaskPool.get();
}
void DeviceBase::AddComputePipelineAsyncCallbackTask(
Ref<ComputePipelineBase> pipeline,
std::string errorMessage,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata,
size_t blueprintHash) {
// CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
// needs to call the private member function DeviceBase::AddOrGetCachedPipeline().
struct CreateComputePipelineAsyncWaitableCallbackTask final
: CreateComputePipelineAsyncCallbackTask {
CreateComputePipelineAsyncWaitableCallbackTask(
Ref<ComputePipelineBase> pipeline,
std::string errorMessage,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata,
size_t blueprintHash)
: CreateComputePipelineAsyncCallbackTask(std::move(pipeline),
errorMessage,
callback,
userdata),
mBlueprintHash(blueprintHash) {
}
void Finish() final {
// TODO(jiawei.shao@intel.com): call AddOrGetCachedPipeline() asynchronously in
// CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
// thread-safe.
if (mPipeline.Get() != nullptr) {
mPipeline =
mPipeline->GetDevice()->AddOrGetCachedPipeline(mPipeline, mBlueprintHash);
}
CreateComputePipelineAsyncCallbackTask::Finish();
}
private:
size_t mBlueprintHash;
};
mCallbackTaskManager->AddCallbackTask(
std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
std::move(pipeline), errorMessage, callback, userdata, blueprintHash));
}
} // namespace dawn_native

View File

@ -26,11 +26,15 @@
#include "dawn_native/DawnNative.h"
#include "dawn_native/dawn_platform.h"
#include <memory>
#include <utility>
namespace dawn_platform {
class WorkerTaskPool;
} // namespace dawn_platform
namespace dawn_native {
class AdapterBase;
class AsyncTaskManager;
class AttachmentState;
class AttachmentStateBlueprint;
class BindGroupLayoutBase;
@ -41,6 +45,7 @@ namespace dawn_native {
class OwnedCompilationMessages;
class PersistentCache;
class StagingBufferBase;
struct CallbackTask;
struct InternalPipelineStore;
struct ShaderModuleParseResult;
@ -278,6 +283,16 @@ namespace dawn_native {
virtual float GetTimestampPeriodInNS() const = 0;
AsyncTaskManager* GetAsyncTaskManager() const;
CallbackTaskManager* GetCallbackTaskManager() const;
dawn_platform::WorkerTaskPool* GetWorkerTaskPool() const;
void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
std::string errorMessage,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata,
size_t blueprintHash);
protected:
void SetToggle(Toggle toggle, bool isEnabled);
void ForceSetToggle(Toggle toggle, bool isEnabled);
@ -332,10 +347,10 @@ namespace dawn_native {
const ComputePipelineDescriptor* descriptor);
Ref<ComputePipelineBase> AddOrGetCachedPipeline(Ref<ComputePipelineBase> computePipeline,
size_t blueprintHash);
void CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
virtual void CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
void ApplyToggleOverrides(const DeviceDescriptor* deviceDescriptor);
void ApplyExtensions(const DeviceDescriptor* deviceDescriptor);
@ -398,7 +413,7 @@ namespace dawn_native {
Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
std::unique_ptr<DynamicUploader> mDynamicUploader;
std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
Ref<QueueBase> mQueue;
struct DeprecationWarnings;
@ -417,6 +432,9 @@ namespace dawn_native {
std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
std::unique_ptr<PersistentCache> mPersistentCache;
std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
std::unique_ptr<dawn_platform::WorkerTaskPool> mWorkerTaskPool;
};
} // namespace dawn_native

View File

@ -18,6 +18,7 @@
#include "common/Log.h"
#include "dawn_native/ErrorData.h"
#include "dawn_native/Surface.h"
#include "dawn_platform/DawnPlatform.h"
#if defined(DAWN_USE_X11)
# include "dawn_native/XlibXcbFunctions.h"
@ -64,6 +65,7 @@ namespace dawn_native {
return instance.Detach();
}
// TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
bool InstanceBase::Initialize(const InstanceDescriptor*) {
return true;
}
@ -225,8 +227,15 @@ namespace dawn_native {
mPlatform = platform;
}
dawn_platform::Platform* InstanceBase::GetPlatform() const {
return mPlatform;
dawn_platform::Platform* InstanceBase::GetPlatform() {
if (mPlatform != nullptr) {
return mPlatform;
}
if (mDefaultPlatform == nullptr) {
mDefaultPlatform = std::make_unique<dawn_platform::Platform>();
}
return mDefaultPlatform.get();
}
const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {

View File

@ -27,6 +27,10 @@
#include <unordered_map>
#include <vector>
namespace dawn_platform {
class Platform;
} // namespace dawn_platform
namespace dawn_native {
class Surface;
@ -66,7 +70,7 @@ namespace dawn_native {
bool IsBeginCaptureOnStartupEnabled() const;
void SetPlatform(dawn_platform::Platform* platform);
dawn_platform::Platform* GetPlatform() const;
dawn_platform::Platform* GetPlatform();
// Get backend-independent libraries that need to be loaded dynamically.
const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
@ -95,6 +99,7 @@ namespace dawn_native {
BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
dawn_platform::Platform* mPlatform = nullptr;
std::unique_ptr<dawn_platform::Platform> mDefaultPlatform;
std::vector<std::unique_ptr<BackendConnection>> mBackends;
std::vector<std::unique_ptr<AdapterBase>> mAdapters;

View File

@ -29,6 +29,7 @@ namespace dawn_native {
if (mCache == nullptr) {
return blob;
}
std::lock_guard<std::mutex> lock(mMutex);
blob.bufferSize = mCache->LoadData(reinterpret_cast<WGPUDevice>(mDevice), key.data(),
key.size(), nullptr, 0);
if (blob.bufferSize > 0) {
@ -48,6 +49,7 @@ namespace dawn_native {
}
ASSERT(value != nullptr);
ASSERT(size > 0);
std::lock_guard<std::mutex> lock(mMutex);
mCache->StoreData(reinterpret_cast<WGPUDevice>(mDevice), key.data(), key.size(), value,
size);
}

View File

@ -17,6 +17,7 @@
#include "dawn_native/Error.h"
#include <mutex>
#include <vector>
namespace dawn_platform {
@ -36,6 +37,10 @@ namespace dawn_native {
enum class PersistentKeyType { Shader };
// This class should always be thread-safe as it is used in Create*PipelineAsync() where it is
// called asynchronously.
// The thread-safety of any access to mCache (the function LoadData() and StoreData()) is
// protected by mMutex.
class PersistentCache {
public:
PersistentCache(DeviceBase* device);
@ -79,6 +84,7 @@ namespace dawn_native {
DeviceBase* mDevice = nullptr;
std::mutex mMutex;
dawn_platform::CachingInterface* mCache = nullptr;
};
} // namespace dawn_native

View File

@ -14,7 +14,8 @@
#include "dawn_native/d3d12/ComputePipelineD3D12.h"
#include "common/Assert.h"
#include "dawn_native/AsyncTask.h"
#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
@ -68,4 +69,16 @@ namespace dawn_native { namespace d3d12 {
return mPipelineState.Get();
}
void ComputePipeline::CreateAsync(Device* device,
const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
std::make_unique<CreateComputePipelineAsyncTask>(pipeline, descriptor, blueprintHash,
callback, userdata);
CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
}
}} // namespace dawn_native::d3d12

View File

@ -28,6 +28,11 @@ namespace dawn_native { namespace d3d12 {
static ResultOrError<Ref<ComputePipeline>> Create(
Device* device,
const ComputePipelineDescriptor* descriptor);
static void CreateAsync(Device* device,
const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
ComputePipeline() = delete;
ID3D12PipelineState* GetPipelineState() const;
@ -35,7 +40,7 @@ namespace dawn_native { namespace d3d12 {
private:
~ComputePipeline() override;
using ComputePipelineBase::ComputePipelineBase;
MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
MaybeError Initialize(const ComputePipelineDescriptor* descriptor) override;
ComPtr<ID3D12PipelineState> mPipelineState;
};

View File

@ -363,6 +363,12 @@ namespace dawn_native { namespace d3d12 {
const TextureViewDescriptor* descriptor) {
return TextureView::Create(texture, descriptor);
}
void Device::CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) {
ComputePipeline::CreateAsync(this, descriptor, blueprintHash, callback, userdata);
}
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer =

View File

@ -171,6 +171,10 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
void CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) override;
void ShutDownImpl() override;
MaybeError WaitForIdleForDestruction() override;

View File

@ -37,7 +37,7 @@ namespace dawn_native { namespace metal {
private:
using ComputePipelineBase::ComputePipelineBase;
MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
MaybeError Initialize(const ComputePipelineDescriptor* descriptor) override;
NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
MTLSize mLocalWorkgroupSize;

View File

@ -35,7 +35,7 @@ namespace dawn_native { namespace vulkan {
private:
~ComputePipeline() override;
using ComputePipelineBase::ComputePipelineBase;
MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
MaybeError Initialize(const ComputePipelineDescriptor* descriptor) override;
VkPipeline mHandle = VK_NULL_HANDLE;
};

View File

@ -67,7 +67,7 @@ TEST_F(AsyncTaskTest, Basic) {
dawn_platform::Platform platform;
std::unique_ptr<dawn_platform::WorkerTaskPool> pool = platform.CreateWorkerTaskPool();
dawn_native::AsnycTaskManager taskManager(pool.get());
dawn_native::AsyncTaskManager taskManager(pool.get());
ConcurrentTaskResultQueue taskResultQueue;
constexpr size_t kTaskCount = 4u;