Remove the future serial

This adds 'HasScheduledCommands()', with which Dawn no longer needs the
future serial to tick and track the async tasks.

Bug: dawn:1413
Change-Id: Ide9ba69b796a46fa8bb70b002f4e2aeb1622bffd
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/98720
Kokoro: Kokoro <noreply+kokoro@google.com>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
Commit-Queue: Jie A Chen <jie.a.chen@intel.com>
This commit is contained in:
jchen10 2022-10-29 03:28:20 +00:00 committed by Dawn LUCI CQ
parent d1910af8bf
commit 5722f2878d
18 changed files with 148 additions and 92 deletions

View File

@ -21,6 +21,7 @@
#include "dawn/common/Alloc.h"
#include "dawn/common/Assert.h"
#include "dawn/native/CallbackTaskManager.h"
#include "dawn/native/Commands.h"
#include "dawn/native/Device.h"
#include "dawn/native/DynamicUploader.h"
@ -34,16 +35,21 @@
namespace dawn::native {
namespace {
struct MapRequestTask : QueueBase::TaskInFlight {
MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) : buffer(std::move(buffer)), id(id) {}
void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
uint64_t(serial));
struct MapRequestTask : TrackTaskCallback {
MapRequestTask(dawn::platform::Platform* platform, Ref<BufferBase> buffer, MapRequestID id)
: TrackTaskCallback(platform), buffer(std::move(buffer)), id(id) {}
void Finish() override {
ASSERT(mSerial != kMaxExecutionSerial);
TRACE_EVENT1(mPlatform, General, "Buffer::TaskInFlight::Finished", "serial",
uint64_t(mSerial));
buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
}
void HandleDeviceLoss() override {
buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
}
void HandleShutDown() override {
buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
}
~MapRequestTask() override = default;
private:
@ -353,10 +359,11 @@ void BufferBase::APIMapAsync(wgpu::MapMode mode,
CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
return;
}
std::unique_ptr<MapRequestTask> request = std::make_unique<MapRequestTask>(this, mLastMapID);
std::unique_ptr<MapRequestTask> request =
std::make_unique<MapRequestTask>(GetDevice()->GetPlatform(), this, mLastMapID);
TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
uint64_t(GetDevice()->GetPendingCommandSerial()));
GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetPendingCommandSerial());
GetDevice()->GetQueue()->TrackTask(std::move(request));
}
void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {

View File

@ -428,7 +428,6 @@ void DeviceBase::Destroy() {
break;
}
ASSERT(mCompletedSerial == mLastSubmittedSerial);
ASSERT(mFutureSerial <= mCompletedSerial);
if (mState != State::BeingCreated) {
// The GPU timeline is finished.
@ -440,6 +439,9 @@ void DeviceBase::Destroy() {
// Call TickImpl once last time to clean up resources
// Ignore errors so that we can continue with destruction
IgnoreErrors(TickImpl());
// Trigger all in-flight TrackTask callbacks from 'mQueue'.
FlushCallbackTaskQueue();
}
// At this point GPU operations are always finished, so we are in the disconnected state.
@ -499,7 +501,6 @@ void DeviceBase::HandleError(InternalErrorType type,
IgnoreErrors(WaitForIdleForDestruction());
IgnoreErrors(TickImpl());
AssumeCommandsComplete();
ASSERT(mFutureSerial <= mCompletedSerial);
mState = State::Disconnected;
// Now everything is as if the device was lost.
@ -694,10 +695,6 @@ ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
return mLastSubmittedSerial;
}
ExecutionSerial DeviceBase::GetFutureSerial() const {
return mFutureSerial;
}
InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
return mInternalPipelineStore.get();
}
@ -707,10 +704,9 @@ void DeviceBase::IncrementLastSubmittedCommandSerial() {
}
void DeviceBase::AssumeCommandsComplete() {
ExecutionSerial maxSerial =
ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
mLastSubmittedSerial = maxSerial;
mCompletedSerial = maxSerial;
// Bump serials so any pending callbacks can be fired.
mLastSubmittedSerial++;
mCompletedSerial = mLastSubmittedSerial;
}
bool DeviceBase::IsDeviceIdle() {
@ -720,24 +716,13 @@ bool DeviceBase::IsDeviceIdle() {
if (!mCallbackTaskManager->IsEmpty()) {
return false;
}
ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
if (mCompletedSerial == maxSerial) {
return true;
}
return false;
return !HasScheduledCommands();
}
ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
return mLastSubmittedSerial + ExecutionSerial(1);
}
void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
if (serial > mFutureSerial) {
mFutureSerial = serial;
}
}
MaybeError DeviceBase::CheckPassedSerials() {
ExecutionSerial completedSerial;
DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
@ -1241,21 +1226,13 @@ bool DeviceBase::APITick() {
MaybeError DeviceBase::Tick() {
DAWN_TRY(ValidateIsAlive());
// to avoid overly ticking, we only want to tick when:
// To avoid overly ticking, we only want to tick when:
// 1. the last submitted serial has moved beyond the completed serial
// 2. or the completed serial has not reached the future serial set by the trackers
if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
// 2. or the backend still has pending commands to submit.
if (HasScheduledCommands()) {
DAWN_TRY(CheckPassedSerials());
DAWN_TRY(TickImpl());
// There is no GPU work in flight, we need to move the serials forward so that
// so that CPU operations waiting on GPU completion can know they don't have to wait.
// AssumeCommandsComplete will assign the max serial we must tick to in order to
// fire the awaiting callbacks.
if (mCompletedSerial == mLastSubmittedSerial) {
AssumeCommandsComplete();
}
// TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
// tick the dynamic uploader before the backend resource allocators. This would allow
// reclaiming resources one tick earlier.
@ -1917,4 +1894,28 @@ uint64_t DeviceBase::GetBufferCopyOffsetAlignmentForDepthStencil() const {
return 4u;
}
bool DeviceBase::HasScheduledCommands() const {
return mLastSubmittedSerial > mCompletedSerial || HasPendingCommands();
}
void DeviceBase::AssumeCommandsCompleteForTesting() {
AssumeCommandsComplete();
}
// All prevously submitted works at the moment will supposedly complete at this serial.
// Internally the serial is computed according to whether frontend and backend have pending
// commands. There are 4 cases of combination:
// 1) Frontend(No), Backend(No)
// 2) Frontend(No), Backend(Yes)
// 3) Frontend(Yes), Backend(No)
// 4) Frontend(Yes), Backend(Yes)
// For case 1, we don't need the serial to track the task as we can ack it right now.
// For case 2 and 4, there will be at least an eventual submission, so we can use
// 'GetPendingCommandSerial' as the serial.
// For case 3, we can't use 'GetPendingCommandSerial' as it won't be submitted surely. Instead we
// use 'GetLastSubmittedCommandSerial', which must be fired eventually.
ExecutionSerial DeviceBase::GetScheduledWorkDoneSerial() const {
return HasPendingCommands() ? GetPendingCommandSerial() : GetLastSubmittedCommandSerial();
}
} // namespace dawn::native

View File

@ -159,7 +159,6 @@ class DeviceBase : public RefCountedWithExternalCount {
ExecutionSerial GetCompletedCommandSerial() const;
ExecutionSerial GetLastSubmittedCommandSerial() const;
ExecutionSerial GetFutureSerial() const;
ExecutionSerial GetPendingCommandSerial() const;
// Many Dawn objects are completely immutable once created which means that if two
@ -349,14 +348,6 @@ class DeviceBase : public RefCountedWithExternalCount {
void APIForceLoss(wgpu::DeviceLostReason reason, const char* message);
QueueBase* GetQueue() const;
// AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
// ticked in order to clean up all pending callback work or to execute asynchronous resource
// writes. It should be given the serial that a callback is tracked with, so that once that
// serial is completed, it can be resolved and cleaned up. This is so that when there is no
// gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
// still check if we have pending work to take care of, rather than hanging and never
// reaching the serial the work will be executed on.
void AddFutureSerial(ExecutionSerial serial);
// Check for passed fences and set the new completed serial
MaybeError CheckPassedSerials();
@ -404,6 +395,16 @@ class DeviceBase : public RefCountedWithExternalCount {
virtual void AppendDebugLayerMessages(ErrorData* error) {}
void AssumeCommandsCompleteForTesting();
// Whether the device is having scheduled commands to be submitted or executed.
// There are "Scheduled" "Pending" and "Executing" commands. Frontend knows "Executing" commands
// and backend knows "Pending" commands. "Scheduled" commands are either "Pending" or
// "Executing".
bool HasScheduledCommands() const;
// The serial by which time all currently submitted or pending operations will be completed.
ExecutionSerial GetScheduledWorkDoneSerial() const;
protected:
// Constructor used only for mocking and testing.
DeviceBase();
@ -497,14 +498,9 @@ class DeviceBase : public RefCountedWithExternalCount {
// mCompletedSerial tracks the last completed command serial that the fence has returned.
// mLastSubmittedSerial tracks the last submitted command serial.
// During device removal, the serials could be artificially incremented
// to make it appear as if commands have been compeleted. They can also be artificially
// incremented when no work is being done in the GPU so CPU operations don't have to wait on
// stale serials.
// mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
// callbacks to fire
// to make it appear as if commands have been compeleted.
ExecutionSerial mCompletedSerial = ExecutionSerial(0);
ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
ExecutionSerial mFutureSerial = ExecutionSerial(0);
// DestroyImpl is used to clean up and release resources used by device, does not wait for
// GPU or check errors.
@ -516,6 +512,9 @@ class DeviceBase : public RefCountedWithExternalCount {
// resources.
virtual MaybeError WaitForIdleForDestruction() = 0;
// Indicates whether the backend has pending commands to be submitted as soon as possible.
virtual bool HasPendingCommands() const = 0;
wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
void* mUncapturedErrorUserdata = nullptr;

View File

@ -130,13 +130,16 @@ ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
return uploadHandle;
}
struct SubmittedWorkDone : QueueBase::TaskInFlight {
SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
: mCallback(callback), mUserdata(userdata) {}
void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
struct SubmittedWorkDone : TrackTaskCallback {
SubmittedWorkDone(dawn::platform::Platform* platform,
WGPUQueueWorkDoneCallback callback,
void* userdata)
: TrackTaskCallback(platform), mCallback(callback), mUserdata(userdata) {}
void Finish() override {
ASSERT(mCallback != nullptr);
TRACE_EVENT1(platform, General, "Queue::SubmittedWorkDone::Finished", "serial",
uint64_t(serial));
ASSERT(mSerial != kMaxExecutionSerial);
TRACE_EVENT1(mPlatform, General, "Queue::SubmittedWorkDone::Finished", "serial",
uint64_t(mSerial));
mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
mCallback = nullptr;
}
@ -145,6 +148,7 @@ struct SubmittedWorkDone : QueueBase::TaskInFlight {
mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
mCallback = nullptr;
}
void HandleShutDown() override { HandleDeviceLoss(); }
~SubmittedWorkDone() override = default;
private:
@ -163,9 +167,11 @@ class ErrorQueue : public QueueBase {
};
} // namespace
// QueueBase
void TrackTaskCallback::SetFinishedSerial(ExecutionSerial serial) {
mSerial = serial;
}
QueueBase::TaskInFlight::~TaskInFlight() {}
// QueueBase
QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor)
: ApiObjectBase(device, descriptor->label) {}
@ -206,21 +212,27 @@ void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
}
std::unique_ptr<SubmittedWorkDone> task =
std::make_unique<SubmittedWorkDone>(callback, userdata);
std::make_unique<SubmittedWorkDone>(GetDevice()->GetPlatform(), callback, userdata);
// Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
// also used to make sure ALL queue work is finished in tests, so we also wait for pending
// commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
// spec).
TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
TrackTask(std::move(task));
TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::APIOnSubmittedWorkDone", "serial",
uint64_t(GetDevice()->GetPendingCommandSerial()));
}
void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
mTasksInFlight.Enqueue(std::move(task), serial);
GetDevice()->AddFutureSerial(serial);
void QueueBase::TrackTask(std::unique_ptr<TrackTaskCallback> task) {
// we can move the task to the callback task manager, as it's ready to be called if there are no
// scheduled commands.
if (!GetDevice()->HasScheduledCommands()) {
task->SetFinishedSerial(GetDevice()->GetCompletedCommandSerial());
GetDevice()->GetCallbackTaskManager()->AddCallbackTask(std::move(task));
} else {
mTasksInFlight.Enqueue(std::move(task), GetDevice()->GetScheduledWorkDoneSerial());
}
}
void QueueBase::Tick(ExecutionSerial finishedSerial) {
@ -232,14 +244,17 @@ void QueueBase::Tick(ExecutionSerial finishedSerial) {
TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::Tick", "finishedSerial",
uint64_t(finishedSerial));
std::vector<std::unique_ptr<TaskInFlight>> tasks;
std::vector<std::unique_ptr<TrackTaskCallback>> tasks;
for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
tasks.push_back(std::move(task));
}
mTasksInFlight.ClearUpTo(finishedSerial);
// Tasks' serials have passed. Move them to the callback task manager. They
// are ready to be called.
for (auto& task : tasks) {
task->Finish(GetDevice()->GetPlatform(), finishedSerial);
task->SetFinishedSerial(finishedSerial);
GetDevice()->GetCallbackTaskManager()->AddCallbackTask(std::move(task));
}
}
@ -286,8 +301,6 @@ MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
memcpy(uploadHandle.mappedBuffer, data, size);
device->AddFutureSerial(device->GetPendingCommandSerial());
return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
buffer, bufferOffset, size);
}
@ -356,8 +369,6 @@ MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
DeviceBase* device = GetDevice();
device->AddFutureSerial(device->GetPendingCommandSerial());
return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
&textureCopy, writeSizePixel);
}

View File

@ -18,6 +18,7 @@
#include <memory>
#include "dawn/common/SerialQueue.h"
#include "dawn/native/CallbackTaskManager.h"
#include "dawn/native/Error.h"
#include "dawn/native/Forward.h"
#include "dawn/native/IntegerTypes.h"
@ -29,14 +30,23 @@
namespace dawn::native {
// For the commands with async callback like 'MapAsync' and 'OnSubmittedWorkDone', we track the
// execution serials of completion in the queue for them. This implements 'CallbackTask' so that the
// aysnc callback can be fired by 'CallbackTaskManager' in a unified way. This also caches the
// finished serial, as the callback needs to use it in the trace event.
struct TrackTaskCallback : CallbackTask {
explicit TrackTaskCallback(dawn::platform::Platform* platform) : mPlatform(platform) {}
void SetFinishedSerial(ExecutionSerial serial);
~TrackTaskCallback() override = default;
protected:
dawn::platform::Platform* mPlatform = nullptr;
// The serial by which time the callback can be fired.
ExecutionSerial mSerial = kMaxExecutionSerial;
};
class QueueBase : public ApiObjectBase {
public:
struct TaskInFlight {
virtual ~TaskInFlight();
virtual void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) = 0;
virtual void HandleDeviceLoss() = 0;
};
~QueueBase() override;
static QueueBase* MakeError(DeviceBase* device);
@ -67,7 +77,7 @@ class QueueBase : public ApiObjectBase {
uint64_t bufferOffset,
const void* data,
size_t size);
void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
void TrackTask(std::unique_ptr<TrackTaskCallback> task);
void Tick(ExecutionSerial finishedSerial);
void HandleDeviceLoss();
@ -111,7 +121,7 @@ class QueueBase : public ApiObjectBase {
void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
SerialQueue<ExecutionSerial, std::unique_ptr<TrackTaskCallback>> mTasksInFlight;
};
} // namespace dawn::native

View File

@ -400,6 +400,10 @@ void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
}
bool Device::HasPendingCommands() const {
return mPendingCommands.IsOpen();
}
MaybeError Device::ExecutePendingCommandContext() {
return mPendingCommands.ExecuteCommandList(this);
}

View File

@ -208,6 +208,7 @@ class Device final : public DeviceBase {
void DestroyImpl() override;
MaybeError WaitForIdleForDestruction() override;
bool HasPendingCommands() const override;
MaybeError CheckDebugLayerAndGenerateErrors();
void AppendDebugLayerMessages(ErrorData* error) override;

View File

@ -127,6 +127,7 @@ class Device final : public DeviceBase {
void InitTogglesFromDriver();
void DestroyImpl() override;
MaybeError WaitForIdleForDestruction() override;
bool HasPendingCommands() const override;
ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
NSPRef<id<MTLDevice>> mMtlDevice;

View File

@ -369,6 +369,10 @@ CommandRecordingContext* Device::GetPendingCommandContext() {
return &mCommandContext;
}
bool Device::HasPendingCommands() const {
return mCommandContext.WasUsed();
}
MaybeError Device::SubmitPendingCommandBuffer() {
if (!mCommandContext.WasUsed()) {
return {};

View File

@ -213,6 +213,10 @@ MaybeError Device::WaitForIdleForDestruction() {
return {};
}
bool Device::HasPendingCommands() const {
return false;
}
MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
uint64_t sourceOffset,
BufferBase* destination,

View File

@ -161,6 +161,7 @@ class Device final : public DeviceBase {
void DestroyImpl() override;
MaybeError WaitForIdleForDestruction() override;
bool HasPendingCommands() const override;
std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;

View File

@ -446,6 +446,12 @@ MaybeError Device::WaitForIdleForDestruction() {
return {};
}
bool Device::HasPendingCommands() const {
// Technically we could have scheduled commands inside the GL driver that are waiting for a
// glFlush but we can't know for sure so we might as well pretend there are no commands.
return false;
}
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
return 1;
}

View File

@ -132,6 +132,7 @@ class Device final : public DeviceBase {
ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
void DestroyImpl() override;
MaybeError WaitForIdleForDestruction() override;
bool HasPendingCommands() const override;
const OpenGLFunctions mGL;

View File

@ -288,6 +288,10 @@ CommandRecordingContext* Device::GetPendingRecordingContext() {
return &mRecordingContext;
}
bool Device::HasPendingCommands() const {
return mRecordingContext.used;
}
MaybeError Device::SubmitPendingCommands() {
if (!mRecordingContext.used) {
return {};

View File

@ -171,6 +171,7 @@ class Device final : public DeviceBase {
void DestroyImpl() override;
MaybeError WaitForIdleForDestruction() override;
bool HasPendingCommands() const override;
// To make it easier to use fn it is a public const member. However
// the Device is allowed to mutate them through these private methods.

View File

@ -116,6 +116,7 @@ class DeviceMock : public DeviceBase {
MOCK_METHOD(ResultOrError<ExecutionSerial>, CheckAndUpdateCompletedSerials, (), (override));
MOCK_METHOD(void, DestroyImpl, (), (override));
MOCK_METHOD(MaybeError, WaitForIdleForDestruction, (), (override));
MOCK_METHOD(bool, HasPendingCommands, (), (const, override));
};
} // namespace dawn::native

View File

@ -322,11 +322,8 @@ TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
heapSerial + HeapVersionID(kNumOfSwitches));
EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
// Ensure switched-over heaps can be recycled by advancing the GPU by at-least
// |kFrameDepth|.
for (uint32_t i = 0; i < kFrameDepth; i++) {
mD3DDevice->APITick();
}
// Ensure switched-over heaps can be recycled by advancing the GPU.
mD3DDevice->AssumeCommandsCompleteForTesting();
// Switch-over |kNumOfSwitches| again reusing the same heaps.
for (uint32_t i = 0; i < kNumOfSwitches; i++) {
@ -414,11 +411,8 @@ TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
// Ensure switched-over heaps can be recycled by advancing the GPU by at-least
// |kFrameDepth|.
for (uint32_t i = 0; i < kFrameDepth; i++) {
mD3DDevice->APITick();
}
// Ensure switched-over heaps can be recycled by advancing the GPU.
mD3DDevice->AssumeCommandsCompleteForTesting();
// Switch-over the pool-allocated heaps.
for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) {

View File

@ -202,6 +202,12 @@ TEST_P(D3D12ResourceResidencyTests, AsyncMappedBufferRead) {
// The mappable buffer should be resident.
EXPECT_TRUE(CheckIfBufferIsResident(buffer));
// Make an empty submit to ensure the buffer's execution serial will not be same as the below
// large buffers.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::CommandBuffer commandBuffer = encoder.Finish();
queue.Submit(1, &commandBuffer);
// Create and touch enough buffers to use the entire budget.
std::vector<wgpu::Buffer> bufferSet = AllocateBuffers(
kDirectlyAllocatedResourceSize, kRestrictedBudgetSize / kDirectlyAllocatedResourceSize,