D3D12: Allocate GPU bind groups at draw/dispatch.

Instead of counting descriptors to be allocated for the entire command
buffer in a pre-pass, the bindgroup state tracker is used to allocate
only dirty bindgroups upon recording draw/dispatch. If the heap has no
more room and must be changed, bindgroups will be re-created according
to the BGL.

A future change will address the CPU descriptors and removal of the
pre-pass.

BUG=dawn:256,dawn:307

Change-Id: I6603de17cfda713bd4512c46e1c93618ca01bb7b
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/13400
Commit-Queue: Bryan Bernhart <bryan.bernhart@intel.com>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
Bryan Bernhart 2020-02-27 01:14:22 +00:00 committed by Commit Bot service account
parent a9d7d47842
commit 0363c3e46d
14 changed files with 627 additions and 198 deletions

View File

@ -301,6 +301,8 @@ source_set("libdawn_native_sources") {
"src/dawn_native/d3d12/D3D12Error.h",
"src/dawn_native/d3d12/D3D12Info.cpp",
"src/dawn_native/d3d12/D3D12Info.h",
"src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.cpp",
"src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.h",
"src/dawn_native/d3d12/DescriptorHeapAllocator.cpp",
"src/dawn_native/d3d12/DescriptorHeapAllocator.h",
"src/dawn_native/d3d12/DeviceD3D12.cpp",
@ -330,6 +332,8 @@ source_set("libdawn_native_sources") {
"src/dawn_native/d3d12/SamplerD3D12.h",
"src/dawn_native/d3d12/ShaderModuleD3D12.cpp",
"src/dawn_native/d3d12/ShaderModuleD3D12.h",
"src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
"src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h",
"src/dawn_native/d3d12/StagingBufferD3D12.cpp",
"src/dawn_native/d3d12/StagingBufferD3D12.h",
"src/dawn_native/d3d12/SwapChainD3D12.cpp",
@ -1017,7 +1021,10 @@ source_set("dawn_white_box_tests_sources") {
}
if (dawn_enable_d3d12) {
sources += [ "src/tests/white_box/D3D12SmallTextureTests.cpp" ]
sources += [
"src/tests/white_box/D3D12DescriptorHeapTests.cpp",
"src/tests/white_box/D3D12SmallTextureTests.cpp",
]
}
if (dawn_enable_metal) {

View File

@ -174,6 +174,8 @@ if (DAWN_ENABLE_D3D12)
"d3d12/D3D12Error.h"
"d3d12/D3D12Info.cpp"
"d3d12/D3D12Info.h"
"d3d12/DescriptorHeapAllocationD3D12.cpp",
"d3d12/DescriptorHeapAllocationD3D12.h",
"d3d12/DescriptorHeapAllocator.cpp"
"d3d12/DescriptorHeapAllocator.h"
"d3d12/DeviceD3D12.cpp"
@ -203,6 +205,8 @@ if (DAWN_ENABLE_D3D12)
"d3d12/SamplerD3D12.h"
"d3d12/ShaderModuleD3D12.cpp"
"d3d12/ShaderModuleD3D12.h"
"d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
"d3d12/ShaderVisibleDescriptorAllocatorD3D12.h",
"d3d12/StagingBufferD3D12.cpp"
"d3d12/StagingBufferD3D12.h"
"d3d12/SwapChainD3D12.cpp"

View File

@ -17,6 +17,7 @@
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
#include "dawn_native/d3d12/BufferD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
@ -27,23 +28,60 @@ namespace dawn_native { namespace d3d12 {
: BindGroupBase(device, descriptor) {
}
void BindGroup::AllocateDescriptors(const DescriptorHeapHandle& cbvUavSrvHeapStart,
uint32_t* cbvUavSrvHeapOffset,
const DescriptorHeapHandle& samplerHeapStart,
uint32_t* samplerHeapOffset) {
const auto* bgl = ToBackend(GetLayout());
const auto& layout = bgl->GetBindingInfo();
ResultOrError<bool> BindGroup::Populate(ShaderVisibleDescriptorAllocator* allocator) {
Device* device = ToBackend(GetDevice());
// Save the offset to the start of the descriptor table in the heap
mCbvUavSrvHeapOffset = *cbvUavSrvHeapOffset;
mSamplerHeapOffset = *samplerHeapOffset;
if (allocator->IsAllocationStillValid(mLastUsageSerial, mHeapSerial)) {
return true;
}
// Attempt to allocate descriptors for the currently bound shader-visible heaps.
// If either failed, return early to re-allocate and switch the heaps.
const BindGroupLayout* bgl = ToBackend(GetLayout());
const Serial pendingSerial = device->GetPendingCommandSerial();
const uint32_t cbvUavSrvDescriptorCount = bgl->GetCbvUavSrvDescriptorCount();
DescriptorHeapAllocation cbvSrvUavDescriptorHeapAllocation;
if (cbvUavSrvDescriptorCount > 0) {
DAWN_TRY_ASSIGN(
cbvSrvUavDescriptorHeapAllocation,
allocator->AllocateGPUDescriptors(cbvUavSrvDescriptorCount, pendingSerial,
D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
if (cbvSrvUavDescriptorHeapAllocation.IsInvalid()) {
return false;
}
mBaseCbvSrvUavDescriptor = cbvSrvUavDescriptorHeapAllocation.GetGPUHandle(0);
}
const uint32_t samplerDescriptorCount = bgl->GetSamplerDescriptorCount();
DescriptorHeapAllocation samplerDescriptorHeapAllocation;
if (samplerDescriptorCount > 0) {
DAWN_TRY_ASSIGN(samplerDescriptorHeapAllocation,
allocator->AllocateGPUDescriptors(samplerDescriptorCount, pendingSerial,
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
if (samplerDescriptorHeapAllocation.IsInvalid()) {
return false;
}
mBaseSamplerDescriptor = samplerDescriptorHeapAllocation.GetGPUHandle(0);
}
// Record both the device and heap serials to determine later if the allocations are still
// valid.
mLastUsageSerial = pendingSerial;
mHeapSerial = allocator->GetShaderVisibleHeapsSerial();
const auto& layout = bgl->GetBindingInfo();
const auto& bindingOffsets = bgl->GetBindingOffsets();
auto d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
ID3D12Device* d3d12Device = device->GetD3D12Device().Get();
for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
// It's not necessary to create descriptors in descriptor heap for dynamic resources.
// So skip allocating descriptors in descriptor heaps for dynamic buffers.
// It's not necessary to create descriptors in descriptor heap for dynamic
// resources. So skip allocating descriptors in descriptor heaps for dynamic
// buffers.
if (layout.hasDynamicOffset[bindingIndex]) {
continue;
}
@ -53,14 +91,14 @@ namespace dawn_native { namespace d3d12 {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
// TODO(enga@google.com): investigate if this needs to be a constraint at the
// API level
// TODO(enga@google.com): investigate if this needs to be a constraint at
// the API level
desc.SizeInBytes = Align(binding.size, 256);
desc.BufferLocation = ToBackend(binding.buffer)->GetVA() + binding.offset;
d3d12Device->CreateConstantBufferView(
&desc, cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
bindingOffsets[bindingIndex]));
&desc, cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
bindingOffsets[bindingIndex]));
} break;
case wgpu::BindingType::StorageBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
@ -83,16 +121,16 @@ namespace dawn_native { namespace d3d12 {
d3d12Device->CreateUnorderedAccessView(
ToBackend(binding.buffer)->GetD3D12Resource().Get(), nullptr, &desc,
cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
bindingOffsets[bindingIndex]));
cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
bindingOffsets[bindingIndex]));
} break;
case wgpu::BindingType::ReadonlyStorageBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
// Like StorageBuffer, SPIRV-Cross outputs HLSL shaders for readonly storage
// buffer with ByteAddressBuffer. So we must use D3D12_BUFFER_SRV_FLAG_RAW
// when making the SRV descriptor. And it has similar requirement for format,
// element size, etc.
// when making the SRV descriptor. And it has similar requirement for
// format, element size, etc.
D3D12_SHADER_RESOURCE_VIEW_DESC desc;
desc.Format = DXGI_FORMAT_R32_TYPELESS;
desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
@ -103,23 +141,23 @@ namespace dawn_native { namespace d3d12 {
desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
d3d12Device->CreateShaderResourceView(
ToBackend(binding.buffer)->GetD3D12Resource().Get(), &desc,
cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
bindingOffsets[bindingIndex]));
cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
bindingOffsets[bindingIndex]));
} break;
case wgpu::BindingType::SampledTexture: {
auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
auto& srv = view->GetSRVDescriptor();
d3d12Device->CreateShaderResourceView(
ToBackend(view->GetTexture())->GetD3D12Resource(), &srv,
cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
bindingOffsets[bindingIndex]));
cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
bindingOffsets[bindingIndex]));
} break;
case wgpu::BindingType::Sampler: {
auto* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
auto& samplerDesc = sampler->GetSamplerDescriptor();
d3d12Device->CreateSampler(
&samplerDesc, samplerHeapStart.GetCPUHandle(*samplerHeapOffset +
bindingOffsets[bindingIndex]));
&samplerDesc,
samplerDescriptorHeapAllocation.GetCPUHandle(bindingOffsets[bindingIndex]));
} break;
case wgpu::BindingType::StorageTexture:
@ -130,24 +168,14 @@ namespace dawn_native { namespace d3d12 {
}
}
// Offset by the number of descriptors created
*cbvUavSrvHeapOffset += bgl->GetCbvUavSrvDescriptorCount();
*samplerHeapOffset += bgl->GetSamplerDescriptorCount();
return true;
}
uint32_t BindGroup::GetCbvUavSrvHeapOffset() const {
return mCbvUavSrvHeapOffset;
D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseCbvUavSrvDescriptor() const {
return mBaseCbvSrvUavDescriptor;
}
uint32_t BindGroup::GetSamplerHeapOffset() const {
return mSamplerHeapOffset;
D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
return mBaseSamplerDescriptor;
}
bool BindGroup::TestAndSetCounted(uint64_t heapSerial, uint32_t indexInSubmit) {
bool isCounted = (mHeapSerial == heapSerial && mIndexInSubmit == indexInSubmit);
mHeapSerial = heapSerial;
mIndexInSubmit = indexInSubmit;
return isCounted;
}
}} // namespace dawn_native::d3d12

View File

@ -15,37 +15,32 @@
#ifndef DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
#define DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
#include "common/Serial.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/d3d12/d3d12_platform.h"
#include "dawn_native/d3d12/DescriptorHeapAllocator.h"
namespace dawn_native { namespace d3d12 {
class Device;
class ShaderVisibleDescriptorAllocator;
class BindGroup : public BindGroupBase {
public:
BindGroup(Device* device, const BindGroupDescriptor* descriptor);
void AllocateDescriptors(const DescriptorHeapHandle& cbvSrvUavHeapStart,
uint32_t* cbvUavSrvHeapOffset,
const DescriptorHeapHandle& samplerHeapStart,
uint32_t* samplerHeapOffset);
uint32_t GetCbvUavSrvHeapOffset() const;
uint32_t GetSamplerHeapOffset() const;
// Returns true if the BindGroup was successfully populated.
ResultOrError<bool> Populate(ShaderVisibleDescriptorAllocator* allocator);
bool TestAndSetCounted(uint64_t heapSerial, uint32_t indexInSubmit);
D3D12_GPU_DESCRIPTOR_HANDLE GetBaseCbvUavSrvDescriptor() const;
D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
private:
uint32_t mCbvUavSrvHeapOffset;
uint32_t mSamplerHeapOffset;
Serial mLastUsageSerial = 0;
Serial mHeapSerial = 0;
uint64_t mHeapSerial = 0;
uint32_t mIndexInSubmit = 0;
D3D12_GPU_DESCRIPTOR_HANDLE mBaseCbvSrvUavDescriptor = {0};
D3D12_GPU_DESCRIPTOR_HANDLE mBaseSamplerDescriptor = {0};
};
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_BINDGROUPD3D12_H_

View File

@ -31,6 +31,7 @@
#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/TextureCopySplitter.h"
#include "dawn_native/d3d12/TextureD3D12.h"
#include "dawn_native/d3d12/UtilsD3D12.h"
@ -71,74 +72,56 @@ namespace dawn_native { namespace d3d12 {
class BindGroupStateTracker : public BindGroupAndStorageBarrierTrackerBase<false, uint64_t> {
public:
BindGroupStateTracker(Device* device)
: BindGroupAndStorageBarrierTrackerBase(), mDevice(device) {
: BindGroupAndStorageBarrierTrackerBase(),
mAllocator(device->GetShaderVisibleDescriptorAllocator()) {
}
void SetInComputePass(bool inCompute_) {
mInCompute = inCompute_;
}
MaybeError AllocateDescriptorHeaps(Device* device) {
// This function should only be called once.
ASSERT(mCbvSrvUavGPUDescriptorHeap.Get() == nullptr &&
mSamplerGPUDescriptorHeap.Get() == nullptr);
DescriptorHeapAllocator* descriptorHeapAllocator = device->GetDescriptorHeapAllocator();
if (mCbvSrvUavDescriptorHeapSize > 0) {
DAWN_TRY_ASSIGN(
mCbvSrvUavGPUDescriptorHeap,
descriptorHeapAllocator->AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
mCbvSrvUavDescriptorHeapSize));
}
if (mSamplerDescriptorHeapSize > 0) {
DAWN_TRY_ASSIGN(mSamplerGPUDescriptorHeap, descriptorHeapAllocator->AllocateGPUHeap(
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER,
mSamplerDescriptorHeapSize));
}
uint32_t cbvSrvUavDescriptorIndex = 0;
uint32_t samplerDescriptorIndex = 0;
for (BindGroup* group : mBindGroupsToAllocate) {
ASSERT(group);
ASSERT(cbvSrvUavDescriptorIndex +
ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount() <=
mCbvSrvUavDescriptorHeapSize);
ASSERT(samplerDescriptorIndex +
ToBackend(group->GetLayout())->GetSamplerDescriptorCount() <=
mSamplerDescriptorHeapSize);
group->AllocateDescriptors(mCbvSrvUavGPUDescriptorHeap, &cbvSrvUavDescriptorIndex,
mSamplerGPUDescriptorHeap, &samplerDescriptorIndex);
}
ASSERT(cbvSrvUavDescriptorIndex == mCbvSrvUavDescriptorHeapSize);
ASSERT(samplerDescriptorIndex == mSamplerDescriptorHeapSize);
return {};
}
// This function must only be called before calling AllocateDescriptorHeaps().
void TrackSetBindGroup(BindGroup* group, uint32_t index, uint32_t indexInSubmit) {
if (mBindGroups[index] != group) {
mBindGroups[index] = group;
if (!group->TestAndSetCounted(mDevice->GetPendingCommandSerial(), indexInSubmit)) {
const BindGroupLayout* layout = ToBackend(group->GetLayout());
mCbvSrvUavDescriptorHeapSize += layout->GetCbvUavSrvDescriptorCount();
mSamplerDescriptorHeapSize += layout->GetSamplerDescriptorCount();
mBindGroupsToAllocate.push_back(group);
MaybeError Apply(CommandRecordingContext* commandContext) {
// Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
// ringbuffer. There can be a single shader-visible descriptor heap of each type bound
// at any given time. This means that when we switch heaps, all other currently bound
// bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
// the signal to change the bounded heaps.
// Re-populating all bindgroups after the last one fails causes duplicated allocations
// to occur on overflow.
// TODO(bryan.bernhart@intel.com): Consider further optimization.
bool didCreateBindGroups = true;
for (uint32_t index : IterateBitSet(mDirtyBindGroups)) {
DAWN_TRY_ASSIGN(didCreateBindGroups,
ToBackend(mBindGroups[index])->Populate(mAllocator));
if (!didCreateBindGroups) {
break;
}
}
}
void Apply(CommandRecordingContext* commandContext) {
// This will re-create bindgroups for both heaps even if only one overflowed.
// TODO(bryan.bernhart@intel.com): Consider re-allocating heaps independently
// such that overflowing one doesn't re-allocate the another.
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
if (!didCreateBindGroups) {
DAWN_TRY(mAllocator->AllocateAndSwitchShaderVisibleHeaps());
mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
mDirtyBindGroups |= mBindGroupLayoutsMask;
// Must be called before applying the bindgroups.
SetID3D12DescriptorHeaps(commandList);
for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
DAWN_TRY_ASSIGN(didCreateBindGroups,
ToBackend(mBindGroups[index])->Populate(mAllocator));
ASSERT(didCreateBindGroups);
}
}
for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index,
ToBackend(mBindGroups[index]), mDynamicOffsetCounts[index],
mDynamicOffsets[index].data());
BindGroup* group = ToBackend(mBindGroups[index]);
ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
}
if (mInCompute) {
@ -169,34 +152,26 @@ namespace dawn_native { namespace d3d12 {
}
}
DidApply();
return {};
}
void Reset() {
for (uint32_t i = 0; i < kMaxBindGroups; ++i) {
mBindGroups[i] = nullptr;
}
}
void SetID3D12DescriptorHeaps(ComPtr<ID3D12GraphicsCommandList> commandList) {
void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
ASSERT(commandList != nullptr);
ID3D12DescriptorHeap* descriptorHeaps[2] = {mCbvSrvUavGPUDescriptorHeap.Get(),
mSamplerGPUDescriptorHeap.Get()};
if (descriptorHeaps[0] && descriptorHeaps[1]) {
commandList->SetDescriptorHeaps(2, descriptorHeaps);
} else if (descriptorHeaps[0]) {
commandList->SetDescriptorHeaps(1, descriptorHeaps);
} else if (descriptorHeaps[1]) {
commandList->SetDescriptorHeaps(1, &descriptorHeaps[1]);
}
std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps =
mAllocator->GetShaderVisibleHeaps();
ASSERT(descriptorHeaps[0] != nullptr);
ASSERT(descriptorHeaps[1] != nullptr);
commandList->SetDescriptorHeaps(2, descriptorHeaps.data());
}
private:
void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
PipelineLayout* pipelineLayout,
const PipelineLayout* pipelineLayout,
uint32_t index,
BindGroup* group,
uint32_t dynamicOffsetCount,
uint64_t* dynamicOffsets) {
const uint64_t* dynamicOffsets) {
// Usually, the application won't set the same offsets many times,
// so always try to apply dynamic offsets even if the offsets stay the same
if (dynamicOffsetCount) {
@ -262,47 +237,37 @@ namespace dawn_native { namespace d3d12 {
return;
}
uint32_t cbvUavSrvCount = ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
uint32_t samplerCount = ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
const uint32_t cbvUavSrvCount =
ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
const uint32_t samplerCount =
ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
if (cbvUavSrvCount > 0) {
uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
group->GetBaseCbvUavSrvDescriptor();
if (mInCompute) {
commandList->SetComputeRootDescriptorTable(
parameterIndex,
mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(group->GetCbvUavSrvHeapOffset()));
commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
} else {
commandList->SetGraphicsRootDescriptorTable(
parameterIndex,
mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(group->GetCbvUavSrvHeapOffset()));
commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
}
}
if (samplerCount > 0) {
uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
group->GetBaseSamplerDescriptor();
if (mInCompute) {
commandList->SetComputeRootDescriptorTable(
parameterIndex,
mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
} else {
commandList->SetGraphicsRootDescriptorTable(
parameterIndex,
mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
}
}
}
uint32_t mCbvSrvUavDescriptorHeapSize = 0;
uint32_t mSamplerDescriptorHeapSize = 0;
std::deque<BindGroup*> mBindGroupsToAllocate = {};
bool mInCompute = false;
DescriptorHeapHandle mCbvSrvUavGPUDescriptorHeap = {};
DescriptorHeapHandle mSamplerGPUDescriptorHeap = {};
Device* mDevice;
ShaderVisibleDescriptorAllocator* mAllocator;
};
class RenderPassDescriptorHeapTracker {
@ -486,21 +451,12 @@ namespace dawn_native { namespace d3d12 {
MaybeError AllocateAndSetDescriptorHeaps(Device* device,
BindGroupStateTracker* bindingTracker,
RenderPassDescriptorHeapTracker* renderPassTracker,
CommandIterator* commands,
uint32_t indexInSubmit) {
CommandIterator* commands) {
{
Command type;
auto HandleCommand = [&](CommandIterator* commands, Command type) {
switch (type) {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
BindGroup* group = ToBackend(cmd->group.Get());
if (cmd->dynamicOffsetCount) {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindingTracker->TrackSetBindGroup(group, cmd->index, indexInSubmit);
} break;
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = commands->NextCommand<BeginRenderPassCmd>();
renderPassTracker->TrackRenderPass(cmd);
@ -534,7 +490,6 @@ namespace dawn_native { namespace d3d12 {
}
DAWN_TRY(renderPassTracker->AllocateRTVAndDSVHeaps());
DAWN_TRY(bindingTracker->AllocateDescriptorHeaps(device));
return {};
}
@ -582,8 +537,7 @@ namespace dawn_native { namespace d3d12 {
FreeCommands(&mCommands);
}
MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext,
uint32_t indexInSubmit) {
MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
Device* device = ToBackend(GetDevice());
BindGroupStateTracker bindingTracker(device);
RenderPassDescriptorHeapTracker renderPassTracker(device);
@ -596,11 +550,13 @@ namespace dawn_native { namespace d3d12 {
// heaps set using a small CommandList inserted just before the main CommandList.
{
DAWN_TRY(AllocateAndSetDescriptorHeaps(device, &bindingTracker, &renderPassTracker,
&mCommands, indexInSubmit));
bindingTracker.Reset();
bindingTracker.SetID3D12DescriptorHeaps(commandList);
&mCommands));
}
// Make sure we use the correct descriptors for this command list. Could be done once per
// actual command list but here is ok because there should be few command buffers.
bindingTracker.SetID3D12DescriptorHeaps(commandList);
// Records the necessary barriers for the resource usage pre-computed by the frontend
auto TransitionForPass = [](CommandRecordingContext* commandContext,
const PassResourceUsage& usages) -> bool {
@ -663,7 +619,7 @@ namespace dawn_native { namespace d3d12 {
TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(true);
RecordComputePass(commandContext, &bindingTracker);
DAWN_TRY(RecordComputePass(commandContext, &bindingTracker));
nextPassNumber++;
} break;
@ -677,8 +633,8 @@ namespace dawn_native { namespace d3d12 {
bindingTracker.SetInComputePass(false);
LazyClearRenderPassAttachments(beginRenderPassCmd);
RecordRenderPass(commandContext, &bindingTracker, &renderPassTracker,
beginRenderPassCmd, passHasUAV);
DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, &renderPassTracker,
beginRenderPassCmd, passHasUAV));
nextPassNumber++;
} break;
@ -827,8 +783,8 @@ namespace dawn_native { namespace d3d12 {
return {};
}
void CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker) {
MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker) {
PipelineLayout* lastLayout = nullptr;
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
@ -838,14 +794,14 @@ namespace dawn_native { namespace d3d12 {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
bindingTracker->Apply(commandContext);
DAWN_TRY(bindingTracker->Apply(commandContext));
commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
} break;
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
bindingTracker->Apply(commandContext);
DAWN_TRY(bindingTracker->Apply(commandContext));
Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDispatchIndirectSignature();
@ -856,7 +812,7 @@ namespace dawn_native { namespace d3d12 {
case Command::EndComputePass: {
mCommands.NextCommand<EndComputePassCmd>();
return;
return {};
} break;
case Command::SetComputePipeline: {
@ -924,6 +880,8 @@ namespace dawn_native { namespace d3d12 {
default: { UNREACHABLE(); } break;
}
}
return {};
}
void CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
@ -1040,7 +998,7 @@ namespace dawn_native { namespace d3d12 {
: nullptr);
}
void CommandBuffer::RecordRenderPass(
MaybeError CommandBuffer::RecordRenderPass(
CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker,
RenderPassDescriptorHeapTracker* renderPassDescriptorHeapTracker,
@ -1093,12 +1051,12 @@ namespace dawn_native { namespace d3d12 {
VertexBufferTracker vertexBufferTracker = {};
IndexBufferTracker indexBufferTracker = {};
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
switch (type) {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
bindingTracker->Apply(commandContext);
DAWN_TRY(bindingTracker->Apply(commandContext));
vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
draw->firstVertex, draw->firstInstance);
@ -1107,7 +1065,7 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
bindingTracker->Apply(commandContext);
DAWN_TRY(bindingTracker->Apply(commandContext));
indexBufferTracker.Apply(commandList);
vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
@ -1118,7 +1076,7 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
bindingTracker->Apply(commandContext);
DAWN_TRY(bindingTracker->Apply(commandContext));
vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
@ -1131,7 +1089,7 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
bindingTracker->Apply(commandContext);
DAWN_TRY(bindingTracker->Apply(commandContext));
indexBufferTracker.Apply(commandList);
vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
@ -1224,6 +1182,7 @@ namespace dawn_native { namespace d3d12 {
UNREACHABLE();
break;
}
return {};
};
Command type;
@ -1236,7 +1195,7 @@ namespace dawn_native { namespace d3d12 {
} else if (renderPass->attachmentState->GetSampleCount() > 1) {
ResolveMultisampledRenderPass(commandContext, renderPass);
}
return;
return {};
} break;
case Command::SetStencilReference: {
@ -1282,14 +1241,14 @@ namespace dawn_native { namespace d3d12 {
CommandIterator* iter = bundles[i]->GetCommands();
iter->Reset();
while (iter->NextCommandId(&type)) {
EncodeRenderBundleCommand(iter, type);
DAWN_TRY(EncodeRenderBundleCommand(iter, type));
}
}
} break;
default: { EncodeRenderBundleCommand(&mCommands, type); } break;
default: { DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type)); } break;
}
}
return {};
}
}} // namespace dawn_native::d3d12

View File

@ -49,16 +49,17 @@ namespace dawn_native { namespace d3d12 {
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
MaybeError RecordCommands(CommandRecordingContext* commandContext, uint32_t indexInSubmit);
MaybeError RecordCommands(CommandRecordingContext* commandContext);
private:
void RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker);
void RecordRenderPass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker,
RenderPassDescriptorHeapTracker* renderPassDescriptorHeapTracker,
BeginRenderPassCmd* renderPass,
bool passHasUAV);
MaybeError RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker);
MaybeError RecordRenderPass(
CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker,
RenderPassDescriptorHeapTracker* renderPassDescriptorHeapTracker,
BeginRenderPassCmd* renderPass,
bool passHasUAV);
void SetupRenderPass(CommandRecordingContext* commandContext,
BeginRenderPassCmd* renderPass,
RenderPassBuilder* renderPassBuilder);

View File

@ -0,0 +1,49 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dawn_native/d3d12/DescriptorHeapAllocationD3D12.h"
#include "dawn_native/Error.h"
namespace dawn_native { namespace d3d12 {
DescriptorHeapAllocation::DescriptorHeapAllocation() : mSizeIncrement(0) {
}
DescriptorHeapAllocation::DescriptorHeapAllocation(
uint32_t sizeIncrement,
D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptorHandle,
D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptorHandle)
: mSizeIncrement(sizeIncrement),
mBaseCPUDescriptorHandle(baseCPUDescriptorHandle),
mBaseGPUDescriptorHandle(baseGPUDescriptorHandle) {
}
D3D12_CPU_DESCRIPTOR_HANDLE DescriptorHeapAllocation::GetCPUHandle(uint32_t offset) const {
ASSERT(!IsInvalid());
D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseCPUDescriptorHandle;
cpuHandle.ptr += mSizeIncrement * offset;
return cpuHandle;
}
D3D12_GPU_DESCRIPTOR_HANDLE DescriptorHeapAllocation::GetGPUHandle(uint32_t offset) const {
ASSERT(!IsInvalid());
D3D12_GPU_DESCRIPTOR_HANDLE gpuHandle = mBaseGPUDescriptorHandle;
gpuHandle.ptr += mSizeIncrement * offset;
return gpuHandle;
}
bool DescriptorHeapAllocation::IsInvalid() const {
return mBaseCPUDescriptorHandle.ptr == 0;
}
}} // namespace dawn_native::d3d12

View File

@ -0,0 +1,46 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef DAWNNATIVE_D3D12_DESCRIPTORHEAPALLOCATIOND3D12_H_
#define DAWNNATIVE_D3D12_DESCRIPTORHEAPALLOCATIOND3D12_H_
#include "dawn_native/d3d12/d3d12_platform.h"
#include <cstdint>
namespace dawn_native { namespace d3d12 {
// Wrapper for a handle into a descriptor heap.
class DescriptorHeapAllocation {
public:
DescriptorHeapAllocation();
DescriptorHeapAllocation(uint32_t sizeIncrement,
D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptorHandle,
D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptorHandle);
~DescriptorHeapAllocation() = default;
D3D12_CPU_DESCRIPTOR_HANDLE GetCPUHandle(uint32_t offset) const;
D3D12_GPU_DESCRIPTOR_HANDLE GetGPUHandle(uint32_t offset) const;
bool IsInvalid() const;
private:
uint32_t mSizeIncrement;
D3D12_CPU_DESCRIPTOR_HANDLE mBaseCPUDescriptorHandle = {0};
D3D12_GPU_DESCRIPTOR_HANDLE mBaseGPUDescriptorHandle = {0};
};
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_DESCRIPTORHEAPALLOCATIOND3D12_H_

View File

@ -35,6 +35,7 @@
#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
#include "dawn_native/d3d12/ShaderModuleD3D12.h"
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/StagingBufferD3D12.h"
#include "dawn_native/d3d12/SwapChainD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
@ -72,6 +73,11 @@ namespace dawn_native { namespace d3d12 {
// Initialize backend services
mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
mDescriptorHeapAllocator = std::make_unique<DescriptorHeapAllocator>(this);
mShaderVisibleDescriptorAllocator =
std::make_unique<ShaderVisibleDescriptorAllocator>(this);
DAWN_TRY(mShaderVisibleDescriptorAllocator->Initialize());
mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
@ -179,7 +185,7 @@ namespace dawn_native { namespace d3d12 {
mResourceAllocatorManager->Tick(mCompletedSerial);
DAWN_TRY(mCommandAllocatorManager->Tick(mCompletedSerial));
mDescriptorHeapAllocator->Deallocate(mCompletedSerial);
mShaderVisibleDescriptorAllocator->Tick(mCompletedSerial);
mMapRequestTracker->Tick(mCompletedSerial);
mUsedComObjectRefs.ClearUpTo(mCompletedSerial);
DAWN_TRY(ExecutePendingCommandContext());
@ -433,4 +439,7 @@ namespace dawn_native { namespace d3d12 {
ASSERT(!mPendingCommands.IsOpen());
}
ShaderVisibleDescriptorAllocator* Device::GetShaderVisibleDescriptorAllocator() const {
return mShaderVisibleDescriptorAllocator.get();
}
}} // namespace dawn_native::d3d12

View File

@ -30,6 +30,7 @@ namespace dawn_native { namespace d3d12 {
class CommandAllocatorManager;
class DescriptorHeapAllocator;
class ShaderVisibleDescriptorAllocator;
class MapRequestTracker;
class PlatformFunctions;
class ResourceAllocatorManager;
@ -95,6 +96,8 @@ namespace dawn_native { namespace d3d12 {
void DeallocateMemory(ResourceHeapAllocation& allocation);
ShaderVisibleDescriptorAllocator* GetShaderVisibleDescriptorAllocator() const;
TextureBase* WrapSharedHandle(const TextureDescriptor* descriptor,
HANDLE sharedHandle,
uint64_t acquireMutexKey);
@ -158,6 +161,7 @@ namespace dawn_native { namespace d3d12 {
std::unique_ptr<DescriptorHeapAllocator> mDescriptorHeapAllocator;
std::unique_ptr<MapRequestTracker> mMapRequestTracker;
std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
std::unique_ptr<ShaderVisibleDescriptorAllocator> mShaderVisibleDescriptorAllocator;
};
}} // namespace dawn_native::d3d12

View File

@ -36,7 +36,7 @@ namespace dawn_native { namespace d3d12 {
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
"CommandBufferD3D12::RecordCommands");
for (uint32_t i = 0; i < commandCount; ++i) {
DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext, i));
DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
}
TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording,
"CommandBufferD3D12::RecordCommands");

View File

@ -0,0 +1,168 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
namespace dawn_native { namespace d3d12 {
// Check that d3d heap type enum correctly mirrors the type index used by the static arrays.
static_assert(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV == 0, "");
static_assert(D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER == 1, "");
uint32_t GetD3D12ShaderVisibleHeapSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
switch (heapType) {
case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
default:
UNREACHABLE();
}
}
D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
switch (heapType) {
case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
default:
UNREACHABLE();
}
}
ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(Device* device)
: mDevice(device),
mSizeIncrements{
device->GetD3D12Device()->GetDescriptorHandleIncrementSize(
D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV),
device->GetD3D12Device()->GetDescriptorHandleIncrementSize(
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER),
} {
}
MaybeError ShaderVisibleDescriptorAllocator::Initialize() {
ASSERT(mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV].heap.Get() == nullptr);
ASSERT(mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER].heap.Get() == nullptr);
DAWN_TRY(AllocateAndSwitchShaderVisibleHeaps());
return {};
}
MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeaps() {
// TODO(bryan.bernhart@intel.com): Allocating to max heap size wastes memory
// should the developer not allocate any bindings for the heap type.
// Consider dynamically re-sizing GPU heaps.
DAWN_TRY(
AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
GetD3D12ShaderVisibleHeapSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV),
GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV)));
DAWN_TRY(AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER,
GetD3D12ShaderVisibleHeapSize(D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER),
GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER)));
// Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
// serial. When a bindgroup attempts to re-populate, it will compare with its recorded
// heap serial.
mShaderVisibleHeapsSerial++;
return {};
}
ResultOrError<DescriptorHeapAllocation>
ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(uint32_t descriptorCount,
Serial pendingSerial,
D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
ASSERT(mShaderVisibleBuffers[heapType].heap != nullptr);
const uint64_t startOffset =
mShaderVisibleBuffers[heapType].allocator.Allocate(descriptorCount, pendingSerial);
if (startOffset == RingBufferAllocator::kInvalidOffset) {
return DescriptorHeapAllocation{}; // Invalid
}
ID3D12DescriptorHeap* descriptorHeap = mShaderVisibleBuffers[heapType].heap.Get();
D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor =
descriptorHeap->GetCPUDescriptorHandleForHeapStart();
baseCPUDescriptor.ptr += mSizeIncrements[heapType] * startOffset;
D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor =
descriptorHeap->GetGPUDescriptorHandleForHeapStart();
baseGPUDescriptor.ptr += mSizeIncrements[heapType] * startOffset;
return DescriptorHeapAllocation{mSizeIncrements[heapType], baseCPUDescriptor,
baseGPUDescriptor};
}
std::array<ID3D12DescriptorHeap*, 2> ShaderVisibleDescriptorAllocator::GetShaderVisibleHeaps()
const {
return {mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV].heap.Get(),
mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER].heap.Get()};
}
void ShaderVisibleDescriptorAllocator::Tick(uint64_t completedSerial) {
for (uint32_t i = 0; i < mShaderVisibleBuffers.size(); i++) {
ASSERT(mShaderVisibleBuffers[i].heap != nullptr);
mShaderVisibleBuffers[i].allocator.Deallocate(completedSerial);
}
}
// Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
MaybeError ShaderVisibleDescriptorAllocator::AllocateGPUHeap(
D3D12_DESCRIPTOR_HEAP_TYPE heapType,
uint32_t descriptorCount,
D3D12_DESCRIPTOR_HEAP_FLAGS heapFlags) {
ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
if (mShaderVisibleBuffers[heapType].heap != nullptr) {
mDevice->ReferenceUntilUnused(std::move(mShaderVisibleBuffers[heapType].heap));
}
D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
heapDescriptor.Type = heapType;
heapDescriptor.NumDescriptors = descriptorCount;
heapDescriptor.Flags = heapFlags;
heapDescriptor.NodeMask = 0;
ComPtr<ID3D12DescriptorHeap> heap;
DAWN_TRY(CheckOutOfMemoryHRESULT(
mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
"ID3D12Device::CreateDescriptorHeap"));
// Create a FIFO buffer from the recently created heap.
mShaderVisibleBuffers[heapType].heap = std::move(heap);
mShaderVisibleBuffers[heapType].allocator = RingBufferAllocator(descriptorCount);
return {};
}
Serial ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapsSerial() const {
return mShaderVisibleHeapsSerial;
}
uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting(
D3D12_DESCRIPTOR_HEAP_TYPE heapType) const {
ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
return mShaderVisibleBuffers[heapType].allocator.GetSize();
}
bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(Serial lastUsageSerial,
Serial heapSerial) const {
// Consider valid if allocated for the pending submit and the shader visible heaps
// have not switched over.
return (lastUsageSerial > mDevice->GetCompletedCommandSerial() &&
heapSerial == mShaderVisibleHeapsSerial);
}
}} // namespace dawn_native::d3d12

View File

@ -0,0 +1,71 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
#define DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
#include "dawn_native/Error.h"
#include "dawn_native/RingBufferAllocator.h"
#include "dawn_native/d3d12/DescriptorHeapAllocationD3D12.h"
#include <array>
namespace dawn_native { namespace d3d12 {
class Device;
// Manages descriptor heap allocators used by the device to create descriptors using allocation
// methods based on the heap type.
class ShaderVisibleDescriptorAllocator {
public:
ShaderVisibleDescriptorAllocator(Device* device);
MaybeError Initialize();
ResultOrError<DescriptorHeapAllocation> AllocateGPUDescriptors(
uint32_t descriptorCount,
Serial pendingSerial,
D3D12_DESCRIPTOR_HEAP_TYPE heapType);
void Tick(uint64_t completedSerial);
Serial GetShaderVisibleHeapsSerial() const;
std::array<ID3D12DescriptorHeap*, 2> GetShaderVisibleHeaps() const;
MaybeError AllocateAndSwitchShaderVisibleHeaps();
uint64_t GetShaderVisibleHeapSizeForTesting(D3D12_DESCRIPTOR_HEAP_TYPE heapType) const;
bool IsAllocationStillValid(Serial lastUsageSerial, Serial heapSerial) const;
private:
MaybeError AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
uint32_t descriptorCount,
D3D12_DESCRIPTOR_HEAP_FLAGS heapFlags);
struct ShaderVisibleBuffer {
ComPtr<ID3D12DescriptorHeap> heap;
RingBufferAllocator allocator;
};
Device* mDevice;
// The serial value of 0 means the shader-visible heaps have not been allocated.
// This value is never returned by GetShaderVisibleHeapsSerial() after Initialize().
Serial mShaderVisibleHeapsSerial = 0;
std::array<ShaderVisibleBuffer, 2> mShaderVisibleBuffers;
std::array<uint32_t, 2> mSizeIncrements;
};
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_

View File

@ -0,0 +1,88 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "tests/DawnTest.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "utils/ComboRenderPipelineDescriptor.h"
#include "utils/WGPUHelpers.h"
constexpr uint32_t kRTSize = 4;
using namespace dawn_native::d3d12;
class D3D12DescriptorHeapTests : public DawnTest {
private:
void TestSetUp() override {
DAWN_SKIP_TEST_IF(UsesWire());
}
};
// Verify the shader visible heaps switch over within a single submit.
TEST_P(D3D12DescriptorHeapTests, SwitchOverHeaps) {
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor(device);
// Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating a
// sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
renderPipelineDescriptor.vertexStage.module =
utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
#version 450
void main() {
gl_Position = vec4(0.f, 0.f, 0.f, 1.f);
})");
renderPipelineDescriptor.cFragmentStage.module =
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(#version 450
layout(set = 0, binding = 0) uniform sampler sampler0;
layout(location = 0) out vec4 fragColor;
void main() {
fragColor = vec4(0.0, 0.0, 0.0, 0.0);
})");
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
wgpu::SamplerDescriptor samplerDesc = utils::GetDefaultSamplerDescriptor();
wgpu::Sampler sampler = device.CreateSampler(&samplerDesc);
Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
ShaderVisibleDescriptorAllocator* allocator = d3dDevice->GetShaderVisibleDescriptorAllocator();
const uint64_t samplerHeapSize =
allocator->GetShaderVisibleHeapSizeForTesting(D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
const Serial heapSerial = allocator->GetShaderVisibleHeapsSerial();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}}));
pass.Draw(3, 1, 0, 0);
}
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
EXPECT_EQ(allocator->GetShaderVisibleHeapsSerial(), heapSerial + 1);
}
DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests, D3D12Backend());