Revert "D3D12: Stage BindGroups on CPU descriptor heaps."
This reverts commit 2479860e4b
.
Reason for revert: Causes WebGPU CTS failures
Original change's description:
> D3D12: Stage BindGroups on CPU descriptor heaps.
>
> Instead of directly populating GPU heaps, pre-encoded
> BindGroups are staged on CPU heaps then copied over
> to the GPU. Non-shader visible allocators are stored
> on the BGL, which hands out fixed-size chunks to
> simplify memory managment. To enable memory re-use,
> CPU allocations are tied to the lifetime of BindGroup
> objects.
>
> BUG=dawn:155
>
> Change-Id: I402e6686c96f7450a077c627c8499600979e426c
> Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/18100
> Commit-Queue: Bryan Bernhart <bryan.bernhart@intel.com>
> Reviewed-by: Corentin Wallez <cwallez@chromium.org>
TBR=cwallez@chromium.org,enga@chromium.org,rafael.cintron@microsoft.com,bryan.bernhart@intel.com
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: dawn:155
Change-Id: I3dfae3e15e2bc21de692513725c9cf3ca38110b5
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/18860
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
parent
d48b329b49
commit
c7f454c241
4
BUILD.gn
4
BUILD.gn
|
@ -291,8 +291,6 @@ source_set("libdawn_native_sources") {
|
||||||
"src/dawn_native/d3d12/BindGroupLayoutD3D12.h",
|
"src/dawn_native/d3d12/BindGroupLayoutD3D12.h",
|
||||||
"src/dawn_native/d3d12/BufferD3D12.cpp",
|
"src/dawn_native/d3d12/BufferD3D12.cpp",
|
||||||
"src/dawn_native/d3d12/BufferD3D12.h",
|
"src/dawn_native/d3d12/BufferD3D12.h",
|
||||||
"src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp",
|
|
||||||
"src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h",
|
|
||||||
"src/dawn_native/d3d12/CommandAllocatorManager.cpp",
|
"src/dawn_native/d3d12/CommandAllocatorManager.cpp",
|
||||||
"src/dawn_native/d3d12/CommandAllocatorManager.h",
|
"src/dawn_native/d3d12/CommandAllocatorManager.h",
|
||||||
"src/dawn_native/d3d12/CommandBufferD3D12.cpp",
|
"src/dawn_native/d3d12/CommandBufferD3D12.cpp",
|
||||||
|
@ -318,8 +316,6 @@ source_set("libdawn_native_sources") {
|
||||||
"src/dawn_native/d3d12/HeapD3D12.h",
|
"src/dawn_native/d3d12/HeapD3D12.h",
|
||||||
"src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp",
|
"src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp",
|
||||||
"src/dawn_native/d3d12/NativeSwapChainImplD3D12.h",
|
"src/dawn_native/d3d12/NativeSwapChainImplD3D12.h",
|
||||||
"src/dawn_native/d3d12/NonShaderVisibleDescriptorAllocatorD3D12.cpp",
|
|
||||||
"src/dawn_native/d3d12/NonShaderVisibleDescriptorAllocatorD3D12.h",
|
|
||||||
"src/dawn_native/d3d12/PipelineLayoutD3D12.cpp",
|
"src/dawn_native/d3d12/PipelineLayoutD3D12.cpp",
|
||||||
"src/dawn_native/d3d12/PipelineLayoutD3D12.h",
|
"src/dawn_native/d3d12/PipelineLayoutD3D12.h",
|
||||||
"src/dawn_native/d3d12/PlatformFunctions.cpp",
|
"src/dawn_native/d3d12/PlatformFunctions.cpp",
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <limits>
|
|
||||||
|
|
||||||
#if defined(DAWN_COMPILER_MSVC)
|
#if defined(DAWN_COMPILER_MSVC)
|
||||||
# include <intrin.h>
|
# include <intrin.h>
|
||||||
|
@ -153,10 +152,3 @@ float SRGBToLinear(float srgb) {
|
||||||
return std::pow((srgb + 0.055f) / 1.055f, 2.4f);
|
return std::pow((srgb + 0.055f) / 1.055f, 2.4f);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t RoundUp(uint64_t n, uint64_t m) {
|
|
||||||
ASSERT(m > 0);
|
|
||||||
ASSERT(n > 0);
|
|
||||||
ASSERT(m <= std::numeric_limits<uint64_t>::max() - n);
|
|
||||||
return ((n + m - 1) / m) * m;
|
|
||||||
}
|
|
|
@ -29,7 +29,6 @@ uint32_t ScanForward(uint32_t bits);
|
||||||
uint32_t Log2(uint32_t value);
|
uint32_t Log2(uint32_t value);
|
||||||
uint32_t Log2(uint64_t value);
|
uint32_t Log2(uint64_t value);
|
||||||
bool IsPowerOfTwo(uint64_t n);
|
bool IsPowerOfTwo(uint64_t n);
|
||||||
uint64_t RoundUp(uint64_t n, uint64_t m);
|
|
||||||
|
|
||||||
uint64_t NextPowerOfTwo(uint64_t n);
|
uint64_t NextPowerOfTwo(uint64_t n);
|
||||||
bool IsPtrAligned(const void* ptr, size_t alignment);
|
bool IsPtrAligned(const void* ptr, size_t alignment);
|
||||||
|
|
|
@ -164,8 +164,6 @@ if (DAWN_ENABLE_D3D12)
|
||||||
"d3d12/BindGroupLayoutD3D12.h"
|
"d3d12/BindGroupLayoutD3D12.h"
|
||||||
"d3d12/BufferD3D12.cpp"
|
"d3d12/BufferD3D12.cpp"
|
||||||
"d3d12/BufferD3D12.h"
|
"d3d12/BufferD3D12.h"
|
||||||
"d3d12/CPUDescriptorHeapAllocationD3D12.cpp"
|
|
||||||
"d3d12/CPUDescriptorHeapAllocationD3D12.h"
|
|
||||||
"d3d12/CommandAllocatorManager.cpp"
|
"d3d12/CommandAllocatorManager.cpp"
|
||||||
"d3d12/CommandAllocatorManager.h"
|
"d3d12/CommandAllocatorManager.h"
|
||||||
"d3d12/CommandBufferD3D12.cpp"
|
"d3d12/CommandBufferD3D12.cpp"
|
||||||
|
@ -191,8 +189,6 @@ if (DAWN_ENABLE_D3D12)
|
||||||
"d3d12/HeapD3D12.h"
|
"d3d12/HeapD3D12.h"
|
||||||
"d3d12/NativeSwapChainImplD3D12.cpp"
|
"d3d12/NativeSwapChainImplD3D12.cpp"
|
||||||
"d3d12/NativeSwapChainImplD3D12.h"
|
"d3d12/NativeSwapChainImplD3D12.h"
|
||||||
"d3d12/NonShaderVisibleDescriptorAllocatorD3D12.cpp"
|
|
||||||
"d3d12/NonShaderVisibleDescriptorAllocatorD3D12.h"
|
|
||||||
"d3d12/PipelineLayoutD3D12.cpp"
|
"d3d12/PipelineLayoutD3D12.cpp"
|
||||||
"d3d12/PipelineLayoutD3D12.h"
|
"d3d12/PipelineLayoutD3D12.h"
|
||||||
"d3d12/PlatformFunctions.cpp"
|
"d3d12/PlatformFunctions.cpp"
|
||||||
|
|
|
@ -25,37 +25,76 @@
|
||||||
namespace dawn_native { namespace d3d12 {
|
namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
// static
|
// static
|
||||||
ResultOrError<BindGroup*> BindGroup::Create(Device* device,
|
BindGroup* BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
|
||||||
const BindGroupDescriptor* descriptor) {
|
|
||||||
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
|
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
BindGroup::BindGroup(Device* device,
|
BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
|
||||||
const BindGroupDescriptor* descriptor,
|
|
||||||
uint32_t viewSizeIncrement,
|
|
||||||
const CPUDescriptorHeapAllocation& viewAllocation,
|
|
||||||
uint32_t samplerSizeIncrement,
|
|
||||||
const CPUDescriptorHeapAllocation& samplerAllocation)
|
|
||||||
: BindGroupBase(this, device, descriptor) {
|
: BindGroupBase(this, device, descriptor) {
|
||||||
BindGroupLayout* bgl = ToBackend(GetLayout());
|
}
|
||||||
|
|
||||||
mCPUViewAllocation = viewAllocation;
|
BindGroup::~BindGroup() {
|
||||||
mCPUSamplerAllocation = samplerAllocation;
|
ToBackend(GetLayout())->DeallocateBindGroup(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultOrError<bool> BindGroup::Populate(ShaderVisibleDescriptorAllocator* allocator) {
|
||||||
|
Device* device = ToBackend(GetDevice());
|
||||||
|
|
||||||
|
if (allocator->IsAllocationStillValid(mLastUsageSerial, mHeapSerial)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to allocate descriptors for the currently bound shader-visible heaps.
|
||||||
|
// If either failed, return early to re-allocate and switch the heaps.
|
||||||
|
const BindGroupLayout* bgl = ToBackend(GetLayout());
|
||||||
|
const Serial pendingSerial = device->GetPendingCommandSerial();
|
||||||
|
|
||||||
|
const uint32_t cbvUavSrvDescriptorCount = bgl->GetCbvUavSrvDescriptorCount();
|
||||||
|
DescriptorHeapAllocation cbvSrvUavDescriptorHeapAllocation;
|
||||||
|
if (cbvUavSrvDescriptorCount > 0) {
|
||||||
|
DAWN_TRY_ASSIGN(
|
||||||
|
cbvSrvUavDescriptorHeapAllocation,
|
||||||
|
allocator->AllocateGPUDescriptors(cbvUavSrvDescriptorCount, pendingSerial,
|
||||||
|
D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
|
||||||
|
if (cbvSrvUavDescriptorHeapAllocation.IsInvalid()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
mBaseCbvSrvUavDescriptor = cbvSrvUavDescriptorHeapAllocation.GetGPUHandle(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
const uint32_t samplerDescriptorCount = bgl->GetSamplerDescriptorCount();
|
||||||
|
DescriptorHeapAllocation samplerDescriptorHeapAllocation;
|
||||||
|
if (samplerDescriptorCount > 0) {
|
||||||
|
DAWN_TRY_ASSIGN(samplerDescriptorHeapAllocation,
|
||||||
|
allocator->AllocateGPUDescriptors(samplerDescriptorCount, pendingSerial,
|
||||||
|
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
|
||||||
|
if (samplerDescriptorHeapAllocation.IsInvalid()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
mBaseSamplerDescriptor = samplerDescriptorHeapAllocation.GetGPUHandle(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record both the device and heap serials to determine later if the allocations are still
|
||||||
|
// valid.
|
||||||
|
mLastUsageSerial = pendingSerial;
|
||||||
|
mHeapSerial = allocator->GetShaderVisibleHeapsSerial();
|
||||||
|
|
||||||
const auto& bindingOffsets = bgl->GetBindingOffsets();
|
const auto& bindingOffsets = bgl->GetBindingOffsets();
|
||||||
|
|
||||||
ID3D12Device* d3d12Device = device->GetD3D12Device().Get();
|
ID3D12Device* d3d12Device = device->GetD3D12Device().Get();
|
||||||
|
|
||||||
// It's not necessary to create descriptors in the descriptor heap for dynamic resources.
|
for (BindingIndex bindingIndex = 0; bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
|
||||||
// This is because they are created as root descriptors which are never heap allocated.
|
|
||||||
// Since dynamic buffers are packed in the front, we can skip over these bindings by
|
|
||||||
// starting from the dynamic buffer count.
|
|
||||||
for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
|
|
||||||
bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
|
|
||||||
const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
|
const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
|
||||||
|
|
||||||
// Increment size does not need to be stored and is only used to get a handle
|
// It's not necessary to create descriptors in descriptor heap for dynamic
|
||||||
// local to the allocation with OffsetFrom().
|
// resources. So skip allocating descriptors in descriptor heaps for dynamic
|
||||||
|
// buffers.
|
||||||
|
if (bindingInfo.hasDynamicOffset) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
switch (bindingInfo.type) {
|
switch (bindingInfo.type) {
|
||||||
case wgpu::BindingType::UniformBuffer: {
|
case wgpu::BindingType::UniformBuffer: {
|
||||||
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
|
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
|
||||||
|
@ -67,8 +106,8 @@ namespace dawn_native { namespace d3d12 {
|
||||||
desc.BufferLocation = ToBackend(binding.buffer)->GetVA() + binding.offset;
|
desc.BufferLocation = ToBackend(binding.buffer)->GetVA() + binding.offset;
|
||||||
|
|
||||||
d3d12Device->CreateConstantBufferView(
|
d3d12Device->CreateConstantBufferView(
|
||||||
&desc,
|
&desc, cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
|
||||||
viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
|
bindingOffsets[bindingIndex]));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case wgpu::BindingType::StorageBuffer: {
|
case wgpu::BindingType::StorageBuffer: {
|
||||||
|
@ -92,7 +131,8 @@ namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
d3d12Device->CreateUnorderedAccessView(
|
d3d12Device->CreateUnorderedAccessView(
|
||||||
ToBackend(binding.buffer)->GetD3D12Resource().Get(), nullptr, &desc,
|
ToBackend(binding.buffer)->GetD3D12Resource().Get(), nullptr, &desc,
|
||||||
viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
|
cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
|
||||||
|
bindingOffsets[bindingIndex]));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case wgpu::BindingType::ReadonlyStorageBuffer: {
|
case wgpu::BindingType::ReadonlyStorageBuffer: {
|
||||||
|
@ -112,7 +152,8 @@ namespace dawn_native { namespace d3d12 {
|
||||||
desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
|
desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
|
||||||
d3d12Device->CreateShaderResourceView(
|
d3d12Device->CreateShaderResourceView(
|
||||||
ToBackend(binding.buffer)->GetD3D12Resource().Get(), &desc,
|
ToBackend(binding.buffer)->GetD3D12Resource().Get(), &desc,
|
||||||
viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
|
cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
|
||||||
|
bindingOffsets[bindingIndex]));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case wgpu::BindingType::SampledTexture: {
|
case wgpu::BindingType::SampledTexture: {
|
||||||
|
@ -120,15 +161,16 @@ namespace dawn_native { namespace d3d12 {
|
||||||
auto& srv = view->GetSRVDescriptor();
|
auto& srv = view->GetSRVDescriptor();
|
||||||
d3d12Device->CreateShaderResourceView(
|
d3d12Device->CreateShaderResourceView(
|
||||||
ToBackend(view->GetTexture())->GetD3D12Resource(), &srv,
|
ToBackend(view->GetTexture())->GetD3D12Resource(), &srv,
|
||||||
viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
|
cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
|
||||||
|
bindingOffsets[bindingIndex]));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case wgpu::BindingType::Sampler: {
|
case wgpu::BindingType::Sampler: {
|
||||||
auto* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
|
auto* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
|
||||||
auto& samplerDesc = sampler->GetSamplerDescriptor();
|
auto& samplerDesc = sampler->GetSamplerDescriptor();
|
||||||
d3d12Device->CreateSampler(
|
d3d12Device->CreateSampler(
|
||||||
&samplerDesc, samplerAllocation.OffsetFrom(samplerSizeIncrement,
|
&samplerDesc,
|
||||||
bindingOffsets[bindingIndex]));
|
samplerDescriptorHeapAllocation.GetCPUHandle(bindingOffsets[bindingIndex]));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,77 +183,12 @@ namespace dawn_native { namespace d3d12 {
|
||||||
// TODO(shaobo.yan@intel.com): Implement dynamic buffer offset.
|
// TODO(shaobo.yan@intel.com): Implement dynamic buffer offset.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
BindGroup::~BindGroup() {
|
|
||||||
ToBackend(GetLayout())
|
|
||||||
->DeallocateBindGroup(this, &mCPUViewAllocation, &mCPUSamplerAllocation);
|
|
||||||
ASSERT(!mCPUViewAllocation.IsValid());
|
|
||||||
ASSERT(!mCPUSamplerAllocation.IsValid());
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultOrError<bool> BindGroup::Populate(ShaderVisibleDescriptorAllocator* allocator) {
|
|
||||||
Device* device = ToBackend(GetDevice());
|
|
||||||
|
|
||||||
if (allocator->IsAllocationStillValid(mLastUsageSerial, mHeapSerial)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt to allocate descriptors for the currently bound shader-visible heaps.
|
|
||||||
// If either failed, return early to re-allocate and switch the heaps.
|
|
||||||
const BindGroupLayout* bgl = ToBackend(GetLayout());
|
|
||||||
const Serial pendingSerial = device->GetPendingCommandSerial();
|
|
||||||
|
|
||||||
ID3D12Device* d3d12Device = device->GetD3D12Device().Get();
|
|
||||||
|
|
||||||
// CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
|
|
||||||
// simple copies per bindgroup, a single non-simple copy could be issued.
|
|
||||||
// TODO(dawn:155): Consider doing this optimization.
|
|
||||||
const uint32_t viewDescriptorCount = bgl->GetCbvUavSrvDescriptorCount();
|
|
||||||
if (viewDescriptorCount > 0) {
|
|
||||||
DescriptorHeapAllocation viewDescriptorHeapAllocation;
|
|
||||||
DAWN_TRY_ASSIGN(
|
|
||||||
viewDescriptorHeapAllocation,
|
|
||||||
allocator->AllocateGPUDescriptors(viewDescriptorCount, pendingSerial,
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
|
|
||||||
if (viewDescriptorHeapAllocation.IsInvalid()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
d3d12Device->CopyDescriptorsSimple(
|
|
||||||
viewDescriptorCount, viewDescriptorHeapAllocation.GetCPUHandle(0),
|
|
||||||
mCPUViewAllocation.OffsetFrom(0, 0), D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
|
|
||||||
|
|
||||||
mBaseViewDescriptor = viewDescriptorHeapAllocation.GetGPUHandle(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
const uint32_t samplerDescriptorCount = bgl->GetSamplerDescriptorCount();
|
|
||||||
if (samplerDescriptorCount > 0) {
|
|
||||||
DescriptorHeapAllocation samplerDescriptorHeapAllocation;
|
|
||||||
DAWN_TRY_ASSIGN(samplerDescriptorHeapAllocation,
|
|
||||||
allocator->AllocateGPUDescriptors(samplerDescriptorCount, pendingSerial,
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
|
|
||||||
if (samplerDescriptorHeapAllocation.IsInvalid()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
d3d12Device->CopyDescriptorsSimple(
|
|
||||||
samplerDescriptorCount, samplerDescriptorHeapAllocation.GetCPUHandle(0),
|
|
||||||
mCPUSamplerAllocation.OffsetFrom(0, 0), D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
|
|
||||||
|
|
||||||
mBaseSamplerDescriptor = samplerDescriptorHeapAllocation.GetGPUHandle(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record both the device and heap serials to determine later if the allocations are still
|
|
||||||
// valid.
|
|
||||||
mLastUsageSerial = pendingSerial;
|
|
||||||
mHeapSerial = allocator->GetShaderVisibleHeapsSerial();
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseCbvUavSrvDescriptor() const {
|
D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseCbvUavSrvDescriptor() const {
|
||||||
return mBaseViewDescriptor;
|
return mBaseCbvSrvUavDescriptor;
|
||||||
}
|
}
|
||||||
|
|
||||||
D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
|
D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#include "common/PlacementAllocated.h"
|
#include "common/PlacementAllocated.h"
|
||||||
#include "common/Serial.h"
|
#include "common/Serial.h"
|
||||||
#include "dawn_native/BindGroup.h"
|
#include "dawn_native/BindGroup.h"
|
||||||
#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
|
#include "dawn_native/d3d12/d3d12_platform.h"
|
||||||
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
|
@ -27,15 +27,9 @@ namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
class BindGroup : public BindGroupBase, public PlacementAllocated {
|
class BindGroup : public BindGroupBase, public PlacementAllocated {
|
||||||
public:
|
public:
|
||||||
static ResultOrError<BindGroup*> Create(Device* device,
|
static BindGroup* Create(Device* device, const BindGroupDescriptor* descriptor);
|
||||||
const BindGroupDescriptor* descriptor);
|
|
||||||
|
|
||||||
BindGroup(Device* device,
|
BindGroup(Device* device, const BindGroupDescriptor* descriptor);
|
||||||
const BindGroupDescriptor* descriptor,
|
|
||||||
uint32_t viewSizeIncrement,
|
|
||||||
const CPUDescriptorHeapAllocation& viewAllocation,
|
|
||||||
uint32_t samplerSizeIncrement,
|
|
||||||
const CPUDescriptorHeapAllocation& samplerAllocation);
|
|
||||||
~BindGroup() override;
|
~BindGroup() override;
|
||||||
|
|
||||||
// Returns true if the BindGroup was successfully populated.
|
// Returns true if the BindGroup was successfully populated.
|
||||||
|
@ -48,11 +42,8 @@ namespace dawn_native { namespace d3d12 {
|
||||||
Serial mLastUsageSerial = 0;
|
Serial mLastUsageSerial = 0;
|
||||||
Serial mHeapSerial = 0;
|
Serial mHeapSerial = 0;
|
||||||
|
|
||||||
D3D12_GPU_DESCRIPTOR_HANDLE mBaseViewDescriptor = {0};
|
D3D12_GPU_DESCRIPTOR_HANDLE mBaseCbvSrvUavDescriptor = {0};
|
||||||
D3D12_GPU_DESCRIPTOR_HANDLE mBaseSamplerDescriptor = {0};
|
D3D12_GPU_DESCRIPTOR_HANDLE mBaseSamplerDescriptor = {0};
|
||||||
|
|
||||||
CPUDescriptorHeapAllocation mCPUSamplerAllocation;
|
|
||||||
CPUDescriptorHeapAllocation mCPUViewAllocation;
|
|
||||||
};
|
};
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#include "common/BitSetIterator.h"
|
#include "common/BitSetIterator.h"
|
||||||
#include "dawn_native/d3d12/BindGroupD3D12.h"
|
#include "dawn_native/d3d12/BindGroupD3D12.h"
|
||||||
#include "dawn_native/d3d12/DeviceD3D12.h"
|
#include "dawn_native/d3d12/DeviceD3D12.h"
|
||||||
#include "dawn_native/d3d12/NonShaderVisibleDescriptorAllocatorD3D12.h"
|
|
||||||
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
namespace dawn_native { namespace d3d12 {
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -42,9 +41,6 @@ namespace dawn_native { namespace d3d12 {
|
||||||
}
|
}
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
// TODO(dawn:155): Figure out this value.
|
|
||||||
static constexpr uint16_t kDescriptorHeapSize = 1024;
|
|
||||||
|
|
||||||
BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
|
BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
|
||||||
: BindGroupLayoutBase(device, descriptor),
|
: BindGroupLayoutBase(device, descriptor),
|
||||||
mDescriptorCounts{},
|
mDescriptorCounts{},
|
||||||
|
@ -132,54 +128,14 @@ namespace dawn_native { namespace d3d12 {
|
||||||
DescriptorType descriptorType = WGPUBindingTypeToDescriptorType(bindingInfo.type);
|
DescriptorType descriptorType = WGPUBindingTypeToDescriptorType(bindingInfo.type);
|
||||||
mBindingOffsets[bindingIndex] += descriptorOffsets[descriptorType];
|
mBindingOffsets[bindingIndex] += descriptorOffsets[descriptorType];
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint32_t viewDescriptorCount = GetCbvUavSrvDescriptorCount();
|
|
||||||
if (viewDescriptorCount > 0) {
|
|
||||||
mViewAllocator = std::make_unique<NonShaderVisibleDescriptorAllocator>(
|
|
||||||
device, viewDescriptorCount, kDescriptorHeapSize,
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
|
|
||||||
}
|
|
||||||
|
|
||||||
const uint32_t samplerDescriptorCount = GetSamplerDescriptorCount();
|
|
||||||
if (samplerDescriptorCount > 0) {
|
|
||||||
mSamplerAllocator = std::make_unique<NonShaderVisibleDescriptorAllocator>(
|
|
||||||
device, samplerDescriptorCount, kDescriptorHeapSize,
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultOrError<BindGroup*> BindGroupLayout::AllocateBindGroup(
|
BindGroup* BindGroupLayout::AllocateBindGroup(Device* device,
|
||||||
Device* device,
|
const BindGroupDescriptor* descriptor) {
|
||||||
const BindGroupDescriptor* descriptor) {
|
return mBindGroupAllocator.Allocate(device, descriptor);
|
||||||
uint32_t viewSizeIncrement = 0;
|
|
||||||
CPUDescriptorHeapAllocation viewAllocation;
|
|
||||||
if (GetCbvUavSrvDescriptorCount() > 0) {
|
|
||||||
DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
|
|
||||||
viewSizeIncrement = mViewAllocator->GetSizeIncrement();
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t samplerSizeIncrement = 0;
|
|
||||||
CPUDescriptorHeapAllocation samplerAllocation;
|
|
||||||
if (GetSamplerDescriptorCount() > 0) {
|
|
||||||
DAWN_TRY_ASSIGN(samplerAllocation, mSamplerAllocator->AllocateCPUDescriptors());
|
|
||||||
samplerSizeIncrement = mSamplerAllocator->GetSizeIncrement();
|
|
||||||
}
|
|
||||||
|
|
||||||
return mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation,
|
|
||||||
samplerSizeIncrement, samplerAllocation);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
|
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
|
||||||
CPUDescriptorHeapAllocation* viewAllocation,
|
|
||||||
CPUDescriptorHeapAllocation* samplerAllocation) {
|
|
||||||
if (viewAllocation->IsValid()) {
|
|
||||||
mViewAllocator->Deallocate(viewAllocation);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (samplerAllocation->IsValid()) {
|
|
||||||
mSamplerAllocator->Deallocate(samplerAllocation);
|
|
||||||
}
|
|
||||||
|
|
||||||
mBindGroupAllocator.Deallocate(bindGroup);
|
mBindGroupAllocator.Deallocate(bindGroup);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,18 +24,13 @@ namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
class BindGroup;
|
class BindGroup;
|
||||||
class Device;
|
class Device;
|
||||||
class NonShaderVisibleDescriptorAllocator;
|
|
||||||
class CPUDescriptorHeapAllocation;
|
|
||||||
|
|
||||||
class BindGroupLayout : public BindGroupLayoutBase {
|
class BindGroupLayout : public BindGroupLayoutBase {
|
||||||
public:
|
public:
|
||||||
BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor);
|
BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor);
|
||||||
|
|
||||||
ResultOrError<BindGroup*> AllocateBindGroup(Device* device,
|
BindGroup* AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
|
||||||
const BindGroupDescriptor* descriptor);
|
void DeallocateBindGroup(BindGroup* bindGroup);
|
||||||
void DeallocateBindGroup(BindGroup* bindGroup,
|
|
||||||
CPUDescriptorHeapAllocation* viewAllocation,
|
|
||||||
CPUDescriptorHeapAllocation* samplerAllocation);
|
|
||||||
|
|
||||||
enum DescriptorType {
|
enum DescriptorType {
|
||||||
CBV,
|
CBV,
|
||||||
|
@ -59,10 +54,6 @@ namespace dawn_native { namespace d3d12 {
|
||||||
D3D12_DESCRIPTOR_RANGE mRanges[DescriptorType::Count];
|
D3D12_DESCRIPTOR_RANGE mRanges[DescriptorType::Count];
|
||||||
|
|
||||||
SlabAllocator<BindGroup> mBindGroupAllocator;
|
SlabAllocator<BindGroup> mBindGroupAllocator;
|
||||||
|
|
||||||
// TODO(dawn:155): Store and bucket allocators by size on the device.
|
|
||||||
std::unique_ptr<NonShaderVisibleDescriptorAllocator> mSamplerAllocator;
|
|
||||||
std::unique_ptr<NonShaderVisibleDescriptorAllocator> mViewAllocator;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
|
@ -1,48 +0,0 @@
|
||||||
// Copyright 2020 The Dawn Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
|
|
||||||
#include "dawn_native/Error.h"
|
|
||||||
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
|
||||||
|
|
||||||
CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(
|
|
||||||
D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
|
|
||||||
uint32_t heapIndex)
|
|
||||||
: mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {
|
|
||||||
}
|
|
||||||
|
|
||||||
D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
|
|
||||||
uint32_t sizeIncrementInBytes,
|
|
||||||
uint32_t offsetInDescriptorCount) const {
|
|
||||||
ASSERT(IsValid());
|
|
||||||
D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
|
|
||||||
cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
|
|
||||||
return cpuHandle;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
|
|
||||||
ASSERT(mHeapIndex >= 0);
|
|
||||||
return mHeapIndex;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool CPUDescriptorHeapAllocation::IsValid() const {
|
|
||||||
return mBaseDescriptor.ptr != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CPUDescriptorHeapAllocation::Invalidate() {
|
|
||||||
mBaseDescriptor = {0};
|
|
||||||
}
|
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
|
|
@ -1,45 +0,0 @@
|
||||||
// Copyright 2020 The Dawn Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#ifndef DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
|
|
||||||
#define DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
#include "dawn_native/d3d12/d3d12_platform.h"
|
|
||||||
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
|
||||||
|
|
||||||
// Wrapper for a handle into a CPU-only descriptor heap.
|
|
||||||
class CPUDescriptorHeapAllocation {
|
|
||||||
public:
|
|
||||||
CPUDescriptorHeapAllocation() = default;
|
|
||||||
CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
|
|
||||||
|
|
||||||
D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
|
|
||||||
uint32_t offsetInDescriptorCount) const;
|
|
||||||
uint32_t GetHeapIndex() const;
|
|
||||||
|
|
||||||
bool IsValid() const;
|
|
||||||
|
|
||||||
void Invalidate();
|
|
||||||
|
|
||||||
private:
|
|
||||||
D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
|
|
||||||
uint32_t mHeapIndex = -1;
|
|
||||||
};
|
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
|
||||||
|
|
||||||
#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
|
|
|
@ -1,137 +0,0 @@
|
||||||
// Copyright 2020 The Dawn Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#include "common/Math.h"
|
|
||||||
|
|
||||||
#include "dawn_native/d3d12/D3D12Error.h"
|
|
||||||
#include "dawn_native/d3d12/DeviceD3D12.h"
|
|
||||||
#include "dawn_native/d3d12/NonShaderVisibleDescriptorAllocatorD3D12.h"
|
|
||||||
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
|
||||||
|
|
||||||
NonShaderVisibleDescriptorAllocator::NonShaderVisibleDescriptorAllocator(
|
|
||||||
Device* device,
|
|
||||||
uint32_t descriptorCount,
|
|
||||||
uint32_t heapSize,
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE heapType)
|
|
||||||
: mDevice(device),
|
|
||||||
mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
|
|
||||||
mBlockSize(descriptorCount * mSizeIncrement),
|
|
||||||
mHeapSize(RoundUp(heapSize, descriptorCount)),
|
|
||||||
mHeapType(heapType) {
|
|
||||||
ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
|
|
||||||
heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
|
|
||||||
ASSERT(descriptorCount <= heapSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
NonShaderVisibleDescriptorAllocator::~NonShaderVisibleDescriptorAllocator() {
|
|
||||||
const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
|
|
||||||
for (auto& buffer : mPool) {
|
|
||||||
ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
|
|
||||||
}
|
|
||||||
ASSERT(mAvailableHeaps.size() == mPool.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultOrError<CPUDescriptorHeapAllocation>
|
|
||||||
NonShaderVisibleDescriptorAllocator::AllocateCPUDescriptors() {
|
|
||||||
if (mAvailableHeaps.empty()) {
|
|
||||||
DAWN_TRY(AllocateCPUHeap());
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(!mAvailableHeaps.empty());
|
|
||||||
|
|
||||||
const uint32_t heapIndex = mAvailableHeaps.back();
|
|
||||||
NonShaderVisibleBuffer& buffer = mPool[heapIndex];
|
|
||||||
|
|
||||||
ASSERT(!buffer.freeBlockIndices.empty());
|
|
||||||
|
|
||||||
const Index blockIndex = buffer.freeBlockIndices.back();
|
|
||||||
|
|
||||||
buffer.freeBlockIndices.pop_back();
|
|
||||||
|
|
||||||
if (buffer.freeBlockIndices.empty()) {
|
|
||||||
mAvailableHeaps.pop_back();
|
|
||||||
}
|
|
||||||
|
|
||||||
const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
|
|
||||||
buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
|
|
||||||
|
|
||||||
return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
|
|
||||||
}
|
|
||||||
|
|
||||||
MaybeError NonShaderVisibleDescriptorAllocator::AllocateCPUHeap() {
|
|
||||||
D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
|
|
||||||
heapDescriptor.Type = mHeapType;
|
|
||||||
heapDescriptor.NumDescriptors = mHeapSize;
|
|
||||||
heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
|
|
||||||
heapDescriptor.NodeMask = 0;
|
|
||||||
|
|
||||||
ComPtr<ID3D12DescriptorHeap> heap;
|
|
||||||
DAWN_TRY(CheckHRESULT(
|
|
||||||
mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
|
|
||||||
"ID3D12Device::CreateDescriptorHeap"));
|
|
||||||
|
|
||||||
NonShaderVisibleBuffer newBuffer;
|
|
||||||
newBuffer.heap = std::move(heap);
|
|
||||||
|
|
||||||
const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
|
|
||||||
newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
|
|
||||||
|
|
||||||
for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
|
|
||||||
newBuffer.freeBlockIndices.push_back(blockIndex);
|
|
||||||
}
|
|
||||||
|
|
||||||
mAvailableHeaps.push_back(mPool.size());
|
|
||||||
mPool.emplace_back(std::move(newBuffer));
|
|
||||||
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
void NonShaderVisibleDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
|
|
||||||
const uint32_t heapIndex = allocation->GetHeapIndex();
|
|
||||||
|
|
||||||
ASSERT(heapIndex < mPool.size());
|
|
||||||
|
|
||||||
// Insert the deallocated block back into the free-list. Order does not matter. However,
|
|
||||||
// having blocks be non-contigious could slow down future allocations due to poor cache
|
|
||||||
// locality.
|
|
||||||
// TODO(dawn:155): Consider more optimization.
|
|
||||||
std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
|
|
||||||
if (freeBlockIndices.empty()) {
|
|
||||||
mAvailableHeaps.emplace_back(heapIndex);
|
|
||||||
}
|
|
||||||
|
|
||||||
const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
|
|
||||||
mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
|
|
||||||
|
|
||||||
const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
|
|
||||||
|
|
||||||
const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
|
|
||||||
|
|
||||||
freeBlockIndices.emplace_back(blockIndex);
|
|
||||||
|
|
||||||
// Invalidate the handle in case the developer accidentally uses it again.
|
|
||||||
allocation->Invalidate();
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t NonShaderVisibleDescriptorAllocator::GetSizeIncrement() const {
|
|
||||||
return mSizeIncrement;
|
|
||||||
}
|
|
||||||
|
|
||||||
NonShaderVisibleDescriptorAllocator::Index
|
|
||||||
NonShaderVisibleDescriptorAllocator::GetFreeBlockIndicesSize() const {
|
|
||||||
return ((mHeapSize * mSizeIncrement) / mBlockSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
|
|
@ -1,78 +0,0 @@
|
||||||
// Copyright 2020 The Dawn Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#ifndef DAWNNATIVE_D3D12_NONSHADERVISIBLEDESCRIPTORALLOCATOR_H_
|
|
||||||
#define DAWNNATIVE_D3D12_NONSHADERVISIBLEDESCRIPTORALLOCATOR_H_
|
|
||||||
|
|
||||||
#include "dawn_native/Error.h"
|
|
||||||
|
|
||||||
#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
// |NonShaderVisibleDescriptorAllocator| allocates a fixed-size block of descriptors from a CPU
|
|
||||||
// descriptor heap pool.
|
|
||||||
// Internally, it manages a list of heaps using a fixed-size block allocator. The fixed-size
|
|
||||||
// block allocator is backed by a list of free blocks (free-list). The heap is in one of two
|
|
||||||
// states: AVAILABLE or not. To allocate, the next free block is removed from the free-list
|
|
||||||
// and the corresponding heap offset is returned. The AVAILABLE heap always has room for
|
|
||||||
// at-least one free block. If no AVAILABLE heap exists, a new heap is created and inserted
|
|
||||||
// back into the pool to be immediately used. To deallocate, the block corresponding to the
|
|
||||||
// offset is inserted back into the free-list.
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
|
||||||
|
|
||||||
class Device;
|
|
||||||
|
|
||||||
class NonShaderVisibleDescriptorAllocator {
|
|
||||||
public:
|
|
||||||
NonShaderVisibleDescriptorAllocator() = default;
|
|
||||||
NonShaderVisibleDescriptorAllocator(Device* device,
|
|
||||||
uint32_t descriptorCount,
|
|
||||||
uint32_t heapSize,
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE heapType);
|
|
||||||
~NonShaderVisibleDescriptorAllocator();
|
|
||||||
|
|
||||||
ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
|
|
||||||
|
|
||||||
void Deallocate(CPUDescriptorHeapAllocation* allocation);
|
|
||||||
|
|
||||||
uint32_t GetSizeIncrement() const;
|
|
||||||
|
|
||||||
private:
|
|
||||||
using Index = uint16_t;
|
|
||||||
|
|
||||||
struct NonShaderVisibleBuffer {
|
|
||||||
ComPtr<ID3D12DescriptorHeap> heap;
|
|
||||||
std::vector<Index> freeBlockIndices;
|
|
||||||
};
|
|
||||||
|
|
||||||
MaybeError AllocateCPUHeap();
|
|
||||||
|
|
||||||
Index GetFreeBlockIndicesSize() const;
|
|
||||||
|
|
||||||
std::vector<uint32_t> mAvailableHeaps; // Indices into the pool.
|
|
||||||
std::vector<NonShaderVisibleBuffer> mPool;
|
|
||||||
|
|
||||||
Device* mDevice;
|
|
||||||
|
|
||||||
uint32_t mSizeIncrement; // Size of the descriptor (in bytes).
|
|
||||||
uint32_t mBlockSize; // Size of the block of descriptors (in bytes).
|
|
||||||
uint32_t mHeapSize; // Size of the heap (in number of descriptors).
|
|
||||||
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
|
|
||||||
};
|
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
|
||||||
|
|
||||||
#endif // DAWNNATIVE_D3D12_NONSHADERVISIBLEDESCRIPTORALLOCATOR_H_
|
|
|
@ -82,7 +82,7 @@ TEST(Math, AlignPtr) {
|
||||||
|
|
||||||
ASSERT_GE(aligned - unaligned, 0);
|
ASSERT_GE(aligned - unaligned, 0);
|
||||||
ASSERT_LT(static_cast<size_t>(aligned - unaligned), kTestAlignment);
|
ASSERT_LT(static_cast<size_t>(aligned - unaligned), kTestAlignment);
|
||||||
ASSERT_EQ(reinterpret_cast<uintptr_t>(aligned) & (kTestAlignment - 1), 0u);
|
ASSERT_EQ(reinterpret_cast<uintptr_t>(aligned) & (kTestAlignment -1), 0u);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,21 +191,3 @@ TEST(Math, SRGBToLinear) {
|
||||||
|
|
||||||
ASSERT_FLOAT_EQ(SRGBToLinear(0.5f), 0.21404114f);
|
ASSERT_FLOAT_EQ(SRGBToLinear(0.5f), 0.21404114f);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests for RoundUp
|
|
||||||
TEST(Math, RoundUp) {
|
|
||||||
ASSERT_EQ(RoundUp(2, 2), 2u);
|
|
||||||
ASSERT_EQ(RoundUp(2, 4), 4u);
|
|
||||||
ASSERT_EQ(RoundUp(6, 2), 6u);
|
|
||||||
ASSERT_EQ(RoundUp(8, 4), 8u);
|
|
||||||
ASSERT_EQ(RoundUp(12, 6), 12u);
|
|
||||||
|
|
||||||
ASSERT_EQ(RoundUp(3, 3), 3u);
|
|
||||||
ASSERT_EQ(RoundUp(3, 5), 5u);
|
|
||||||
ASSERT_EQ(RoundUp(5, 3), 6u);
|
|
||||||
ASSERT_EQ(RoundUp(9, 5), 10u);
|
|
||||||
|
|
||||||
// Test extrema
|
|
||||||
ASSERT_EQ(RoundUp(0x7FFFFFFFFFFFFFFFull, 0x8000000000000000ull), 0x8000000000000000ull);
|
|
||||||
ASSERT_EQ(RoundUp(1, 1), 1u);
|
|
||||||
}
|
|
||||||
|
|
|
@ -15,9 +15,7 @@
|
||||||
#include "tests/DawnTest.h"
|
#include "tests/DawnTest.h"
|
||||||
|
|
||||||
#include "dawn_native/Toggles.h"
|
#include "dawn_native/Toggles.h"
|
||||||
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
|
|
||||||
#include "dawn_native/d3d12/DeviceD3D12.h"
|
#include "dawn_native/d3d12/DeviceD3D12.h"
|
||||||
#include "dawn_native/d3d12/NonShaderVisibleDescriptorAllocatorD3D12.h"
|
|
||||||
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
|
#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
|
||||||
#include "utils/ComboRenderPipelineDescriptor.h"
|
#include "utils/ComboRenderPipelineDescriptor.h"
|
||||||
#include "utils/WGPUHelpers.h"
|
#include "utils/WGPUHelpers.h"
|
||||||
|
@ -95,31 +93,6 @@ class D3D12DescriptorHeapTests : public DawnTest {
|
||||||
wgpu::ShaderModule mSimpleFSModule;
|
wgpu::ShaderModule mSimpleFSModule;
|
||||||
};
|
};
|
||||||
|
|
||||||
class DummyNonShaderVisibleDescriptorAllocator {
|
|
||||||
public:
|
|
||||||
DummyNonShaderVisibleDescriptorAllocator(Device* device,
|
|
||||||
uint32_t descriptorCount,
|
|
||||||
uint32_t allocationsPerHeap)
|
|
||||||
: mAllocator(device,
|
|
||||||
descriptorCount,
|
|
||||||
allocationsPerHeap * descriptorCount,
|
|
||||||
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) {
|
|
||||||
}
|
|
||||||
|
|
||||||
CPUDescriptorHeapAllocation AllocateCPUDescriptors() {
|
|
||||||
dawn_native::ResultOrError<CPUDescriptorHeapAllocation> result =
|
|
||||||
mAllocator.AllocateCPUDescriptors();
|
|
||||||
return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{};
|
|
||||||
}
|
|
||||||
|
|
||||||
void Deallocate(CPUDescriptorHeapAllocation& allocation) {
|
|
||||||
mAllocator.Deallocate(&allocation);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
NonShaderVisibleDescriptorAllocator mAllocator;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Verify the shader visible heaps switch over within a single submit.
|
// Verify the shader visible heaps switch over within a single submit.
|
||||||
TEST_P(D3D12DescriptorHeapTests, SwitchOverHeaps) {
|
TEST_P(D3D12DescriptorHeapTests, SwitchOverHeaps) {
|
||||||
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor(device);
|
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor(device);
|
||||||
|
@ -715,157 +688,6 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify a single allocate/deallocate.
|
|
||||||
// One non-shader visible heap will be created.
|
|
||||||
TEST_P(D3D12DescriptorHeapTests, Single) {
|
|
||||||
constexpr uint32_t kDescriptorCount = 4;
|
|
||||||
constexpr uint32_t kAllocationsPerHeap = 3;
|
|
||||||
DummyNonShaderVisibleDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
|
||||||
kAllocationsPerHeap);
|
|
||||||
|
|
||||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
|
||||||
EXPECT_EQ(allocation.GetHeapIndex(), 0u);
|
|
||||||
EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
|
|
||||||
|
|
||||||
allocator.Deallocate(allocation);
|
|
||||||
EXPECT_FALSE(allocation.IsValid());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify allocating many times causes the pool to increase in size.
|
|
||||||
// Creates |kNumOfHeaps| non-shader visible heaps.
|
|
||||||
TEST_P(D3D12DescriptorHeapTests, Sequential) {
|
|
||||||
constexpr uint32_t kDescriptorCount = 4;
|
|
||||||
constexpr uint32_t kAllocationsPerHeap = 3;
|
|
||||||
DummyNonShaderVisibleDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
|
||||||
kAllocationsPerHeap);
|
|
||||||
|
|
||||||
// Allocate |kNumOfHeaps| worth.
|
|
||||||
constexpr uint32_t kNumOfHeaps = 2;
|
|
||||||
|
|
||||||
std::set<uint32_t> allocatedHeaps;
|
|
||||||
|
|
||||||
std::vector<CPUDescriptorHeapAllocation> allocations;
|
|
||||||
for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) {
|
|
||||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
|
||||||
EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap);
|
|
||||||
EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
|
|
||||||
allocations.push_back(allocation);
|
|
||||||
allocatedHeaps.insert(allocation.GetHeapIndex());
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps);
|
|
||||||
|
|
||||||
// Deallocate all.
|
|
||||||
for (CPUDescriptorHeapAllocation& allocation : allocations) {
|
|
||||||
allocator.Deallocate(allocation);
|
|
||||||
EXPECT_FALSE(allocation.IsValid());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that re-allocating a number of allocations < pool size, all heaps are reused.
|
|
||||||
// Creates and reuses |kNumofHeaps| non-shader visible heaps.
|
|
||||||
TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
|
|
||||||
constexpr uint32_t kDescriptorCount = 4;
|
|
||||||
constexpr uint32_t kAllocationsPerHeap = 25;
|
|
||||||
DummyNonShaderVisibleDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
|
||||||
kAllocationsPerHeap);
|
|
||||||
|
|
||||||
constexpr uint32_t kNumofHeaps = 10;
|
|
||||||
|
|
||||||
std::list<CPUDescriptorHeapAllocation> allocations;
|
|
||||||
std::set<size_t> allocationPtrs;
|
|
||||||
|
|
||||||
// Allocate |kNumofHeaps| heaps worth.
|
|
||||||
for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
|
|
||||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
|
||||||
allocations.push_back(allocation);
|
|
||||||
EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deallocate all.
|
|
||||||
for (CPUDescriptorHeapAllocation& allocation : allocations) {
|
|
||||||
allocator.Deallocate(allocation);
|
|
||||||
EXPECT_FALSE(allocation.IsValid());
|
|
||||||
}
|
|
||||||
|
|
||||||
allocations.clear();
|
|
||||||
|
|
||||||
// Re-allocate all again.
|
|
||||||
std::set<size_t> reallocatedPtrs;
|
|
||||||
for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
|
|
||||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
|
||||||
allocations.push_back(allocation);
|
|
||||||
EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
|
|
||||||
EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(),
|
|
||||||
allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deallocate all again.
|
|
||||||
for (CPUDescriptorHeapAllocation& allocation : allocations) {
|
|
||||||
allocator.Deallocate(allocation);
|
|
||||||
EXPECT_FALSE(allocation.IsValid());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify allocating then deallocating many times.
|
|
||||||
TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
|
|
||||||
constexpr uint32_t kDescriptorCount = 4;
|
|
||||||
constexpr uint32_t kAllocationsPerHeap = 25;
|
|
||||||
DummyNonShaderVisibleDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
|
||||||
kAllocationsPerHeap);
|
|
||||||
|
|
||||||
std::list<CPUDescriptorHeapAllocation> list3;
|
|
||||||
std::list<CPUDescriptorHeapAllocation> list5;
|
|
||||||
std::list<CPUDescriptorHeapAllocation> allocations;
|
|
||||||
|
|
||||||
constexpr uint32_t kNumofHeaps = 2;
|
|
||||||
|
|
||||||
// Allocate |kNumofHeaps| heaps worth.
|
|
||||||
for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
|
|
||||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
|
||||||
EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
|
|
||||||
if (i % 3 == 0) {
|
|
||||||
list3.push_back(allocation);
|
|
||||||
} else {
|
|
||||||
allocations.push_back(allocation);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deallocate every 3rd allocation.
|
|
||||||
for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) {
|
|
||||||
allocator.Deallocate(*it);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate again.
|
|
||||||
for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
|
|
||||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
|
||||||
EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
|
|
||||||
if (i % 5 == 0) {
|
|
||||||
list5.push_back(allocation);
|
|
||||||
} else {
|
|
||||||
allocations.push_back(allocation);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deallocate every 5th allocation.
|
|
||||||
for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) {
|
|
||||||
allocator.Deallocate(*it);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate again.
|
|
||||||
for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
|
|
||||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
|
||||||
EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
|
|
||||||
allocations.push_back(allocation);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deallocate remaining.
|
|
||||||
for (CPUDescriptorHeapAllocation& allocation : allocations) {
|
|
||||||
allocator.Deallocate(allocation);
|
|
||||||
EXPECT_FALSE(allocation.IsValid());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
|
DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
|
||||||
D3D12Backend(),
|
D3D12Backend(),
|
||||||
D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
|
D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
|
||||||
|
|
Loading…
Reference in New Issue