mirror of
https://github.com/encounter/dawn-cmake.git
synced 2025-06-12 09:33:37 +00:00
Converts each of the native API backends to use RPD2 natively, started converting the old format to the new one in the deprecated entry point, removed all other handling and validation of the old format, and turned on the deprecation warning. BUG: dawn:642 Change-Id: I20b671960a83f65ecb4ce6ce1165a563025983cd Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/46726 Reviewed-by: Kai Ninomiya <kainino@chromium.org> Reviewed-by: Austin Eng <enga@chromium.org> Commit-Queue: Brandon Jones <bajones@chromium.org>
487 lines
17 KiB
C++
487 lines
17 KiB
C++
// Copyright 2017 The Dawn Authors
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
#include "dawn_native/null/DeviceNull.h"
|
|
|
|
#include "dawn_native/BackendConnection.h"
|
|
#include "dawn_native/Commands.h"
|
|
#include "dawn_native/ErrorData.h"
|
|
#include "dawn_native/Instance.h"
|
|
#include "dawn_native/Surface.h"
|
|
|
|
#include <spirv_cross.hpp>
|
|
|
|
namespace dawn_native { namespace null {
|
|
|
|
// Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
|
|
|
|
Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
|
|
mPCIInfo.name = "Null backend";
|
|
mAdapterType = wgpu::AdapterType::CPU;
|
|
|
|
// Enable all extensions by default for the convenience of tests.
|
|
mSupportedExtensions.extensionsBitSet.flip();
|
|
}
|
|
|
|
Adapter::~Adapter() = default;
|
|
|
|
// Used for the tests that intend to use an adapter without all extensions enabled.
|
|
void Adapter::SetSupportedExtensions(const std::vector<const char*>& requiredExtensions) {
|
|
mSupportedExtensions = GetInstance()->ExtensionNamesToExtensionsSet(requiredExtensions);
|
|
}
|
|
|
|
ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
|
|
return Device::Create(this, descriptor);
|
|
}
|
|
|
|
class Backend : public BackendConnection {
|
|
public:
|
|
Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) {
|
|
}
|
|
|
|
std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override {
|
|
// There is always a single Null adapter because it is purely CPU based and doesn't
|
|
// depend on the system.
|
|
std::vector<std::unique_ptr<AdapterBase>> adapters;
|
|
adapters.push_back(std::make_unique<Adapter>(GetInstance()));
|
|
return adapters;
|
|
}
|
|
};
|
|
|
|
BackendConnection* Connect(InstanceBase* instance) {
|
|
return new Backend(instance);
|
|
}
|
|
|
|
struct CopyFromStagingToBufferOperation : PendingOperation {
|
|
virtual void Execute() {
|
|
destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
|
|
}
|
|
|
|
StagingBufferBase* staging;
|
|
Ref<Buffer> destination;
|
|
uint64_t sourceOffset;
|
|
uint64_t destinationOffset;
|
|
uint64_t size;
|
|
};
|
|
|
|
// Device
|
|
|
|
// static
|
|
ResultOrError<Device*> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
|
|
Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
|
|
DAWN_TRY(device->Initialize());
|
|
return device.Detach();
|
|
}
|
|
|
|
Device::~Device() {
|
|
ShutDownBase();
|
|
}
|
|
|
|
MaybeError Device::Initialize() {
|
|
return DeviceBase::Initialize(new Queue(this));
|
|
}
|
|
|
|
ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
|
|
const BindGroupDescriptor* descriptor) {
|
|
return AcquireRef(new BindGroup(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
|
|
const BindGroupLayoutDescriptor* descriptor) {
|
|
return AcquireRef(new BindGroupLayout(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
|
|
DAWN_TRY(IncrementMemoryUsage(descriptor->size));
|
|
return AcquireRef(new Buffer(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
|
|
CommandEncoder* encoder,
|
|
const CommandBufferDescriptor* descriptor) {
|
|
return AcquireRef(new CommandBuffer(encoder, descriptor));
|
|
}
|
|
ResultOrError<Ref<ComputePipelineBase>> Device::CreateComputePipelineImpl(
|
|
const ComputePipelineDescriptor* descriptor) {
|
|
return AcquireRef(new ComputePipeline(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
|
|
const PipelineLayoutDescriptor* descriptor) {
|
|
return AcquireRef(new PipelineLayout(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
|
|
const QuerySetDescriptor* descriptor) {
|
|
return AcquireRef(new QuerySet(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
|
|
const RenderPipelineDescriptor2* descriptor) {
|
|
return AcquireRef(new RenderPipeline(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
|
|
return AcquireRef(new Sampler(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
|
|
const ShaderModuleDescriptor* descriptor,
|
|
ShaderModuleParseResult* parseResult) {
|
|
Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
|
|
DAWN_TRY(module->Initialize(parseResult));
|
|
return module;
|
|
}
|
|
ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
|
|
const SwapChainDescriptor* descriptor) {
|
|
return AcquireRef(new OldSwapChain(this, descriptor));
|
|
}
|
|
ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
|
|
Surface* surface,
|
|
NewSwapChainBase* previousSwapChain,
|
|
const SwapChainDescriptor* descriptor) {
|
|
return SwapChain::Create(this, surface, previousSwapChain, descriptor);
|
|
}
|
|
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
|
|
return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
|
|
}
|
|
ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
|
|
TextureBase* texture,
|
|
const TextureViewDescriptor* descriptor) {
|
|
return AcquireRef(new TextureView(texture, descriptor));
|
|
}
|
|
|
|
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
|
|
std::unique_ptr<StagingBufferBase> stagingBuffer =
|
|
std::make_unique<StagingBuffer>(size, this);
|
|
DAWN_TRY(stagingBuffer->Initialize());
|
|
return std::move(stagingBuffer);
|
|
}
|
|
|
|
void Device::ShutDownImpl() {
|
|
ASSERT(GetState() == State::Disconnected);
|
|
|
|
// Clear pending operations before checking mMemoryUsage because some operations keep a
|
|
// reference to Buffers.
|
|
mPendingOperations.clear();
|
|
ASSERT(mMemoryUsage == 0);
|
|
}
|
|
|
|
MaybeError Device::WaitForIdleForDestruction() {
|
|
mPendingOperations.clear();
|
|
return {};
|
|
}
|
|
|
|
MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
|
|
uint64_t sourceOffset,
|
|
BufferBase* destination,
|
|
uint64_t destinationOffset,
|
|
uint64_t size) {
|
|
if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
|
|
destination->SetIsDataInitialized();
|
|
}
|
|
|
|
auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
|
|
operation->staging = source;
|
|
operation->destination = ToBackend(destination);
|
|
operation->sourceOffset = sourceOffset;
|
|
operation->destinationOffset = destinationOffset;
|
|
operation->size = size;
|
|
|
|
AddPendingOperation(std::move(operation));
|
|
|
|
return {};
|
|
}
|
|
|
|
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
|
const TextureDataLayout& src,
|
|
TextureCopy* dst,
|
|
const Extent3D& copySizePixels) {
|
|
return {};
|
|
}
|
|
|
|
MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
|
|
static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
|
|
if (bytes > kMaxMemoryUsage || mMemoryUsage + bytes > kMaxMemoryUsage) {
|
|
return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
|
|
}
|
|
mMemoryUsage += bytes;
|
|
return {};
|
|
}
|
|
|
|
void Device::DecrementMemoryUsage(uint64_t bytes) {
|
|
ASSERT(mMemoryUsage >= bytes);
|
|
mMemoryUsage -= bytes;
|
|
}
|
|
|
|
MaybeError Device::TickImpl() {
|
|
SubmitPendingOperations();
|
|
return {};
|
|
}
|
|
|
|
ExecutionSerial Device::CheckAndUpdateCompletedSerials() {
|
|
return GetLastSubmittedCommandSerial();
|
|
}
|
|
|
|
void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
|
|
mPendingOperations.emplace_back(std::move(operation));
|
|
}
|
|
void Device::SubmitPendingOperations() {
|
|
for (auto& operation : mPendingOperations) {
|
|
operation->Execute();
|
|
}
|
|
mPendingOperations.clear();
|
|
|
|
CheckPassedSerials();
|
|
IncrementLastSubmittedCommandSerial();
|
|
}
|
|
|
|
// BindGroupDataHolder
|
|
|
|
BindGroupDataHolder::BindGroupDataHolder(size_t size)
|
|
: mBindingDataAllocation(malloc(size)) // malloc is guaranteed to return a
|
|
// pointer aligned enough for the allocation
|
|
{
|
|
}
|
|
|
|
BindGroupDataHolder::~BindGroupDataHolder() {
|
|
free(mBindingDataAllocation);
|
|
}
|
|
|
|
// BindGroup
|
|
|
|
BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
|
|
: BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
|
|
BindGroupBase(device, descriptor, mBindingDataAllocation) {
|
|
}
|
|
|
|
// Buffer
|
|
|
|
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
|
|
: BufferBase(device, descriptor) {
|
|
mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
|
|
}
|
|
|
|
Buffer::~Buffer() {
|
|
DestroyInternal();
|
|
ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
|
|
}
|
|
|
|
bool Buffer::IsCPUWritableAtCreation() const {
|
|
// Only return true for mappable buffers so we can test cases that need / don't need a
|
|
// staging buffer.
|
|
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
|
|
}
|
|
|
|
MaybeError Buffer::MapAtCreationImpl() {
|
|
return {};
|
|
}
|
|
|
|
void Buffer::CopyFromStaging(StagingBufferBase* staging,
|
|
uint64_t sourceOffset,
|
|
uint64_t destinationOffset,
|
|
uint64_t size) {
|
|
uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
|
|
memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
|
|
}
|
|
|
|
void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
|
|
ASSERT(bufferOffset + size <= GetSize());
|
|
ASSERT(mBackingData);
|
|
memcpy(mBackingData.get() + bufferOffset, data, size);
|
|
}
|
|
|
|
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
|
return {};
|
|
}
|
|
|
|
void* Buffer::GetMappedPointerImpl() {
|
|
return mBackingData.get();
|
|
}
|
|
|
|
void Buffer::UnmapImpl() {
|
|
}
|
|
|
|
void Buffer::DestroyImpl() {
|
|
}
|
|
|
|
// CommandBuffer
|
|
|
|
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
|
|
: CommandBufferBase(encoder, descriptor) {
|
|
}
|
|
|
|
// QuerySet
|
|
|
|
QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
|
|
: QuerySetBase(device, descriptor) {
|
|
}
|
|
|
|
QuerySet::~QuerySet() {
|
|
DestroyInternal();
|
|
}
|
|
|
|
void QuerySet::DestroyImpl() {
|
|
}
|
|
|
|
// Queue
|
|
|
|
Queue::Queue(Device* device) : QueueBase(device) {
|
|
}
|
|
|
|
Queue::~Queue() {
|
|
}
|
|
|
|
MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
|
|
Device* device = ToBackend(GetDevice());
|
|
|
|
// The Vulkan, D3D12 and Metal implementation all tick the device here,
|
|
// for testing purposes we should also tick in the null implementation.
|
|
DAWN_TRY(device->Tick());
|
|
|
|
device->SubmitPendingOperations();
|
|
return {};
|
|
}
|
|
|
|
MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
|
|
uint64_t bufferOffset,
|
|
const void* data,
|
|
size_t size) {
|
|
ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
|
|
return {};
|
|
}
|
|
|
|
// SwapChain
|
|
|
|
// static
|
|
ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
|
|
Surface* surface,
|
|
NewSwapChainBase* previousSwapChain,
|
|
const SwapChainDescriptor* descriptor) {
|
|
Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
|
|
DAWN_TRY(swapchain->Initialize(previousSwapChain));
|
|
return swapchain;
|
|
}
|
|
|
|
MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
|
|
if (previousSwapChain != nullptr) {
|
|
// TODO(cwallez@chromium.org): figure out what should happen when surfaces are used by
|
|
// multiple backends one after the other. It probably needs to block until the backend
|
|
// and GPU are completely finished with the previous swapchain.
|
|
if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
|
|
return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
|
|
}
|
|
}
|
|
|
|
return {};
|
|
}
|
|
|
|
SwapChain::~SwapChain() = default;
|
|
|
|
MaybeError SwapChain::PresentImpl() {
|
|
mTexture->APIDestroy();
|
|
mTexture = nullptr;
|
|
return {};
|
|
}
|
|
|
|
ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
|
|
TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
|
|
// TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
|
|
mTexture = AcquireRef(
|
|
new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
|
|
// TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
|
|
return mTexture->APICreateView();
|
|
}
|
|
|
|
void SwapChain::DetachFromSurfaceImpl() {
|
|
if (mTexture != nullptr) {
|
|
mTexture->APIDestroy();
|
|
mTexture = nullptr;
|
|
}
|
|
}
|
|
|
|
// ShaderModule
|
|
|
|
MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
|
|
return InitializeBase(parseResult);
|
|
}
|
|
|
|
// OldSwapChain
|
|
|
|
OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
|
|
: OldSwapChainBase(device, descriptor) {
|
|
const auto& im = GetImplementation();
|
|
im.Init(im.userData, nullptr);
|
|
}
|
|
|
|
OldSwapChain::~OldSwapChain() {
|
|
}
|
|
|
|
TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
|
|
return GetDevice()->APICreateTexture(descriptor);
|
|
}
|
|
|
|
MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
|
|
return {};
|
|
}
|
|
|
|
// NativeSwapChainImpl
|
|
|
|
void NativeSwapChainImpl::Init(WSIContext* context) {
|
|
}
|
|
|
|
DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
|
|
WGPUTextureUsage,
|
|
uint32_t width,
|
|
uint32_t height) {
|
|
return DAWN_SWAP_CHAIN_NO_ERROR;
|
|
}
|
|
|
|
DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
|
|
return DAWN_SWAP_CHAIN_NO_ERROR;
|
|
}
|
|
|
|
DawnSwapChainError NativeSwapChainImpl::Present() {
|
|
return DAWN_SWAP_CHAIN_NO_ERROR;
|
|
}
|
|
|
|
wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
|
|
return wgpu::TextureFormat::RGBA8Unorm;
|
|
}
|
|
|
|
// StagingBuffer
|
|
|
|
StagingBuffer::StagingBuffer(size_t size, Device* device)
|
|
: StagingBufferBase(size), mDevice(device) {
|
|
}
|
|
|
|
StagingBuffer::~StagingBuffer() {
|
|
if (mBuffer) {
|
|
mDevice->DecrementMemoryUsage(GetSize());
|
|
}
|
|
}
|
|
|
|
MaybeError StagingBuffer::Initialize() {
|
|
DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
|
|
mBuffer = std::make_unique<uint8_t[]>(GetSize());
|
|
mMappedPointer = mBuffer.get();
|
|
return {};
|
|
}
|
|
|
|
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
|
return 1;
|
|
}
|
|
|
|
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
|
return 1;
|
|
}
|
|
|
|
float Device::GetTimestampPeriodInNS() const {
|
|
return 1.0f;
|
|
}
|
|
|
|
}} // namespace dawn_native::null
|