// Copyright 2017 The Dawn Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dawn_native/null/DeviceNull.h" #include "dawn_native/BackendConnection.h" #include "dawn_native/Commands.h" #include "dawn_native/ErrorData.h" #include "dawn_native/Instance.h" #include "dawn_native/Surface.h" namespace dawn_native { namespace null { // Implementation of pre-Device objects: the null adapter, null backend connection and Connect() Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) { mPCIInfo.name = "Null backend"; mAdapterType = wgpu::AdapterType::CPU; MaybeError err = Initialize(); ASSERT(err.IsSuccess()); } Adapter::~Adapter() = default; bool Adapter::SupportsExternalImages() const { return false; } // Used for the tests that intend to use an adapter without all features enabled. void Adapter::SetSupportedFeatures(const std::vector& requiredFeatures) { mSupportedFeatures = GetInstance()->FeatureNamesToFeaturesSet(requiredFeatures); } MaybeError Adapter::InitializeImpl() { return {}; } MaybeError Adapter::InitializeSupportedFeaturesImpl() { // Enable all features by default for the convenience of tests. mSupportedFeatures.featuresBitSet.set(); return {}; } MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) { GetDefaultLimits(&limits->v1); return {}; } ResultOrError Adapter::CreateDeviceImpl(const DawnDeviceDescriptor* descriptor) { return Device::Create(this, descriptor); } class Backend : public BackendConnection { public: Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) { } std::vector> DiscoverDefaultAdapters() override { // There is always a single Null adapter because it is purely CPU based and doesn't // depend on the system. std::vector> adapters; std::unique_ptr adapter = std::make_unique(GetInstance()); adapters.push_back(std::move(adapter)); return adapters; } }; BackendConnection* Connect(InstanceBase* instance) { return new Backend(instance); } struct CopyFromStagingToBufferOperation : PendingOperation { virtual void Execute() { destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size); } StagingBufferBase* staging; Ref destination; uint64_t sourceOffset; uint64_t destinationOffset; uint64_t size; }; // Device // static ResultOrError Device::Create(Adapter* adapter, const DawnDeviceDescriptor* descriptor) { Ref device = AcquireRef(new Device(adapter, descriptor)); DAWN_TRY(device->Initialize()); return device.Detach(); } Device::~Device() { Destroy(); } MaybeError Device::Initialize() { return DeviceBase::Initialize(new Queue(this)); } ResultOrError> Device::CreateBindGroupImpl( const BindGroupDescriptor* descriptor) { return AcquireRef(new BindGroup(this, descriptor)); } ResultOrError> Device::CreateBindGroupLayoutImpl( const BindGroupLayoutDescriptor* descriptor, PipelineCompatibilityToken pipelineCompatibilityToken) { return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken)); } ResultOrError> Device::CreateBufferImpl(const BufferDescriptor* descriptor) { DAWN_TRY(IncrementMemoryUsage(descriptor->size)); return AcquireRef(new Buffer(this, descriptor)); } ResultOrError> Device::CreateCommandBuffer( CommandEncoder* encoder, const CommandBufferDescriptor* descriptor) { return AcquireRef(new CommandBuffer(encoder, descriptor)); } Ref Device::CreateUninitializedComputePipelineImpl( const ComputePipelineDescriptor* descriptor) { return AcquireRef(new ComputePipeline(this, descriptor)); } ResultOrError> Device::CreatePipelineLayoutImpl( const PipelineLayoutDescriptor* descriptor) { return AcquireRef(new PipelineLayout(this, descriptor)); } ResultOrError> Device::CreateQuerySetImpl( const QuerySetDescriptor* descriptor) { return AcquireRef(new QuerySet(this, descriptor)); } Ref Device::CreateUninitializedRenderPipelineImpl( const RenderPipelineDescriptor* descriptor) { return AcquireRef(new RenderPipeline(this, descriptor)); } ResultOrError> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) { return AcquireRef(new Sampler(this, descriptor)); } ResultOrError> Device::CreateShaderModuleImpl( const ShaderModuleDescriptor* descriptor, ShaderModuleParseResult* parseResult) { Ref module = AcquireRef(new ShaderModule(this, descriptor)); DAWN_TRY(module->Initialize(parseResult)); return module; } ResultOrError> Device::CreateSwapChainImpl( const SwapChainDescriptor* descriptor) { return AcquireRef(new OldSwapChain(this, descriptor)); } ResultOrError> Device::CreateSwapChainImpl( Surface* surface, NewSwapChainBase* previousSwapChain, const SwapChainDescriptor* descriptor) { return SwapChain::Create(this, surface, previousSwapChain, descriptor); } ResultOrError> Device::CreateTextureImpl(const TextureDescriptor* descriptor) { return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal)); } ResultOrError> Device::CreateTextureViewImpl( TextureBase* texture, const TextureViewDescriptor* descriptor) { return AcquireRef(new TextureView(texture, descriptor)); } ResultOrError> Device::CreateStagingBuffer(size_t size) { std::unique_ptr stagingBuffer = std::make_unique(size, this); DAWN_TRY(stagingBuffer->Initialize()); return std::move(stagingBuffer); } void Device::DestroyImpl() { ASSERT(GetState() == State::Disconnected); // Clear pending operations before checking mMemoryUsage because some operations keep a // reference to Buffers. mPendingOperations.clear(); ASSERT(mMemoryUsage == 0); } MaybeError Device::WaitForIdleForDestruction() { mPendingOperations.clear(); return {}; } MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source, uint64_t sourceOffset, BufferBase* destination, uint64_t destinationOffset, uint64_t size) { if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) { destination->SetIsDataInitialized(); } auto operation = std::make_unique(); operation->staging = source; operation->destination = ToBackend(destination); operation->sourceOffset = sourceOffset; operation->destinationOffset = destinationOffset; operation->size = size; AddPendingOperation(std::move(operation)); return {}; } MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source, const TextureDataLayout& src, TextureCopy* dst, const Extent3D& copySizePixels) { return {}; } MaybeError Device::IncrementMemoryUsage(uint64_t bytes) { static_assert(kMaxMemoryUsage <= std::numeric_limits::max(), ""); if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) { return DAWN_OUT_OF_MEMORY_ERROR("Out of memory."); } mMemoryUsage += bytes; return {}; } void Device::DecrementMemoryUsage(uint64_t bytes) { ASSERT(mMemoryUsage >= bytes); mMemoryUsage -= bytes; } MaybeError Device::TickImpl() { return SubmitPendingOperations(); } ResultOrError Device::CheckAndUpdateCompletedSerials() { return GetLastSubmittedCommandSerial(); } void Device::AddPendingOperation(std::unique_ptr operation) { mPendingOperations.emplace_back(std::move(operation)); } MaybeError Device::SubmitPendingOperations() { for (auto& operation : mPendingOperations) { operation->Execute(); } mPendingOperations.clear(); DAWN_TRY(CheckPassedSerials()); IncrementLastSubmittedCommandSerial(); return {}; } // BindGroupDataHolder BindGroupDataHolder::BindGroupDataHolder(size_t size) : mBindingDataAllocation(malloc(size)) // malloc is guaranteed to return a // pointer aligned enough for the allocation { } BindGroupDataHolder::~BindGroupDataHolder() { free(mBindingDataAllocation); } // BindGroup BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor) : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()), BindGroupBase(device, descriptor, mBindingDataAllocation) { } // BindGroupLayout BindGroupLayout::BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor, PipelineCompatibilityToken pipelineCompatibilityToken) : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) { } // Buffer Buffer::Buffer(Device* device, const BufferDescriptor* descriptor) : BufferBase(device, descriptor) { mBackingData = std::unique_ptr(new uint8_t[GetSize()]); mAllocatedSize = GetSize(); } bool Buffer::IsCPUWritableAtCreation() const { // Only return true for mappable buffers so we can test cases that need / don't need a // staging buffer. return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0; } MaybeError Buffer::MapAtCreationImpl() { return {}; } void Buffer::CopyFromStaging(StagingBufferBase* staging, uint64_t sourceOffset, uint64_t destinationOffset, uint64_t size) { uint8_t* ptr = reinterpret_cast(staging->GetMappedPointer()); memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size); } void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) { ASSERT(bufferOffset + size <= GetSize()); ASSERT(mBackingData); memcpy(mBackingData.get() + bufferOffset, data, size); } MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) { return {}; } void* Buffer::GetMappedPointerImpl() { return mBackingData.get(); } void Buffer::UnmapImpl() { } void Buffer::DestroyImpl() { ToBackend(GetDevice())->DecrementMemoryUsage(GetSize()); } // CommandBuffer CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor) : CommandBufferBase(encoder, descriptor) { } // QuerySet QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor) : QuerySetBase(device, descriptor) { } void QuerySet::DestroyImpl() { } // Queue Queue::Queue(Device* device) : QueueBase(device) { } Queue::~Queue() { } MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) { Device* device = ToBackend(GetDevice()); // The Vulkan, D3D12 and Metal implementation all tick the device here, // for testing purposes we should also tick in the null implementation. DAWN_TRY(device->Tick()); return device->SubmitPendingOperations(); } MaybeError Queue::WriteBufferImpl(BufferBase* buffer, uint64_t bufferOffset, const void* data, size_t size) { ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size); return {}; } // ComputePipeline MaybeError ComputePipeline::Initialize() { return {}; } // RenderPipeline MaybeError RenderPipeline::Initialize() { return {}; } // SwapChain // static ResultOrError> SwapChain::Create(Device* device, Surface* surface, NewSwapChainBase* previousSwapChain, const SwapChainDescriptor* descriptor) { Ref swapchain = AcquireRef(new SwapChain(device, surface, descriptor)); DAWN_TRY(swapchain->Initialize(previousSwapChain)); return swapchain; } MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) { if (previousSwapChain != nullptr) { // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by // multiple backends one after the other. It probably needs to block until the backend // and GPU are completely finished with the previous swapchain. if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) { return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs"); } } return {}; } SwapChain::~SwapChain() = default; MaybeError SwapChain::PresentImpl() { mTexture->APIDestroy(); mTexture = nullptr; return {}; } ResultOrError SwapChain::GetCurrentTextureViewImpl() { TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this); // TODO(dawn:723): change to not use AcquireRef for reentrant object creation. mTexture = AcquireRef( new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal)); // TODO(dawn:723): change to not use AcquireRef for reentrant object creation. return mTexture->APICreateView(); } void SwapChain::DetachFromSurfaceImpl() { if (mTexture != nullptr) { mTexture->APIDestroy(); mTexture = nullptr; } } // ShaderModule MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) { return InitializeBase(parseResult); } // OldSwapChain OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor) : OldSwapChainBase(device, descriptor) { const auto& im = GetImplementation(); im.Init(im.userData, nullptr); } OldSwapChain::~OldSwapChain() { } TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) { return GetDevice()->APICreateTexture(descriptor); } MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) { return {}; } // NativeSwapChainImpl void NativeSwapChainImpl::Init(WSIContext* context) { } DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format, WGPUTextureUsage, uint32_t width, uint32_t height) { return DAWN_SWAP_CHAIN_NO_ERROR; } DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) { return DAWN_SWAP_CHAIN_NO_ERROR; } DawnSwapChainError NativeSwapChainImpl::Present() { return DAWN_SWAP_CHAIN_NO_ERROR; } wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const { return wgpu::TextureFormat::RGBA8Unorm; } // StagingBuffer StagingBuffer::StagingBuffer(size_t size, Device* device) : StagingBufferBase(size), mDevice(device) { } StagingBuffer::~StagingBuffer() { if (mBuffer) { mDevice->DecrementMemoryUsage(GetSize()); } } MaybeError StagingBuffer::Initialize() { DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize())); mBuffer = std::make_unique(GetSize()); mMappedPointer = mBuffer.get(); return {}; } uint32_t Device::GetOptimalBytesPerRowAlignment() const { return 1; } uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const { return 1; } float Device::GetTimestampPeriodInNS() const { return 1.0f; } }} // namespace dawn_native::null