Vulkan: Start buffers, hack SetSubData and MapRead

This commit is contained in:
Corentin Wallez 2017-11-21 19:04:51 -05:00 committed by Corentin Wallez
parent b8387a62a6
commit 315e9268bb
9 changed files with 479 additions and 34 deletions

View File

@ -289,6 +289,10 @@ if (NXT_ENABLE_VULKAN)
list(APPEND BACKEND_SOURCES
${VULKAN_DIR}/vulkan_platform.h
${VULKAN_DIR}/BufferVk.cpp
${VULKAN_DIR}/BufferVk.h
${VULKAN_DIR}/MemoryAllocator.cpp
${VULKAN_DIR}/MemoryAllocator.h
${VULKAN_DIR}/VulkanBackend.cpp
${VULKAN_DIR}/VulkanBackend.h
${VULKAN_DIR}/VulkanFunctions.cpp

View File

@ -172,7 +172,6 @@ namespace d3d12 {
}
}
BufferView::BufferView(BufferViewBuilder* builder)
: BufferViewBase(builder) {

View File

@ -0,0 +1,148 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "backend/vulkan/BufferVk.h"
#include "backend/vulkan/VulkanBackend.h"
#include <cstring>
namespace backend {
namespace vulkan {
namespace {
VkBufferUsageFlags VulkanBufferUsage(nxt::BufferUsageBit usage) {
VkBufferUsageFlags flags = 0;
if (usage & nxt::BufferUsageBit::TransferSrc) {
flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
}
if (usage & nxt::BufferUsageBit::TransferDst) {
flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
if (usage & nxt::BufferUsageBit::Index) {
flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
}
if (usage & nxt::BufferUsageBit::Vertex) {
flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
}
if (usage & nxt::BufferUsageBit::Uniform) {
flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
}
if (usage & nxt::BufferUsageBit::Storage) {
flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
}
return flags;
}
}
Buffer::Buffer(BufferBuilder* builder)
: BufferBase(builder) {
Device* device = ToBackend(GetDevice());
VkBufferCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.size = GetSize();
createInfo.usage = VulkanBufferUsage(GetAllowedUsage());
createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.queueFamilyIndexCount = 0;
createInfo.pQueueFamilyIndices = 0;
if (device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &handle) != VK_SUCCESS) {
ASSERT(false);
}
VkMemoryRequirements requirements;
device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), handle, &requirements);
bool requestMappable = (GetAllowedUsage() & (nxt::BufferUsageBit::MapRead | nxt::BufferUsageBit::MapWrite)) != 0;
if (!device->GetMemoryAllocator()->Allocate(requirements, requestMappable, &memoryAllocation)) {
ASSERT(false);
}
if (device->fn.BindBufferMemory(device->GetVkDevice(), handle, memoryAllocation.GetMemory(),
memoryAllocation.GetMemoryOffset()) != VK_SUCCESS) {
ASSERT(false);
}
}
Buffer::~Buffer() {
Device* device = ToBackend(GetDevice());
device->GetMemoryAllocator()->Free(&memoryAllocation);
if (handle != VK_NULL_HANDLE) {
device->fn.DestroyBuffer(device->GetVkDevice(), handle, nullptr);
handle = VK_NULL_HANDLE;
}
}
void Buffer::OnMapReadCommandSerialFinished(uint32_t mapSerial, const void* data) {
CallMapReadCallback(mapSerial, NXT_BUFFER_MAP_READ_STATUS_SUCCESS, data);
}
void Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) {
// TODO(cwallez@chromium.org): Make a proper resource uploader. Not all resources
// can be directly mapped.
uint8_t* memory = memoryAllocation.GetMappedPointer();
ASSERT(memory != nullptr);
memcpy(memory + start * sizeof(uint32_t), data, count * sizeof(uint32_t));
}
void Buffer::MapReadAsyncImpl(uint32_t serial, uint32_t start, uint32_t /*count*/) {
const uint8_t* memory = memoryAllocation.GetMappedPointer();
ASSERT(memory != nullptr);
MapReadRequestTracker* tracker = ToBackend(GetDevice())->GetMapReadRequestTracker();
tracker->Track(this, serial, memory + start);
}
void Buffer::UnmapImpl() {
// No need to do anything, we keep CPU-visible memory mapped at all time.
}
void Buffer::TransitionUsageImpl(nxt::BufferUsageBit, nxt::BufferUsageBit) {
}
MapReadRequestTracker::MapReadRequestTracker(Device* device)
: device(device) {
}
MapReadRequestTracker::~MapReadRequestTracker() {
ASSERT(inflightRequests.Empty());
}
void MapReadRequestTracker::Track(Buffer* buffer, uint32_t mapSerial, const void* data) {
Request request;
request.buffer = buffer;
request.mapSerial = mapSerial;
request.data = data;
inflightRequests.Enqueue(std::move(request), device->GetSerial());
}
void MapReadRequestTracker::Tick(Serial finishedSerial) {
for (auto& request : inflightRequests.IterateUpTo(finishedSerial)) {
request.buffer->OnMapReadCommandSerialFinished(request.mapSerial, request.data);
}
inflightRequests.ClearUpTo(finishedSerial);
}
}
}

View File

@ -0,0 +1,68 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_VULKAN_BUFFERVK_H_
#define BACKEND_VULKAN_BUFFERVK_H_
#include "backend/Buffer.h"
#include "backend/vulkan/vulkan_platform.h"
#include "backend/vulkan/MemoryAllocator.h"
#include "common/SerialQueue.h"
namespace backend {
namespace vulkan {
class Device;
class Buffer : public BufferBase {
public:
Buffer(BufferBuilder* builder);
~Buffer();
void OnMapReadCommandSerialFinished(uint32_t mapSerial, const void* data);
private:
void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) override;
void MapReadAsyncImpl(uint32_t serial, uint32_t start, uint32_t count) override;
void UnmapImpl() override;
void TransitionUsageImpl(nxt::BufferUsageBit currentUsage, nxt::BufferUsageBit targetUsage) override;
VkBuffer handle = VK_NULL_HANDLE;
DeviceMemoryAllocation memoryAllocation;
};
class MapReadRequestTracker {
public:
MapReadRequestTracker(Device* device);
~MapReadRequestTracker();
void Track(Buffer* buffer, uint32_t mapSerial, const void* data);
void Tick(Serial finishedSerial);
private:
Device* device;
struct Request {
Ref<Buffer> buffer;
uint32_t mapSerial;
const void* data;
};
SerialQueue<Request> inflightRequests;
};
}
}
#endif // BACKEND_VULKAN_BUFFERVK_H_

View File

@ -12,4 +12,5 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "backend/vulkan/BufferVk.h"
#include "backend/vulkan/VulkanBackend.h"

View File

@ -0,0 +1,131 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "backend/vulkan/MemoryAllocator.h"
#include "backend/vulkan/VulkanBackend.h"
namespace backend {
namespace vulkan {
DeviceMemoryAllocation::~DeviceMemoryAllocation() {
ASSERT(memory == VK_NULL_HANDLE);
}
VkDeviceMemory DeviceMemoryAllocation::GetMemory() const {
return memory;
}
size_t DeviceMemoryAllocation::GetMemoryOffset() const {
return offset;
}
uint8_t* DeviceMemoryAllocation::GetMappedPointer() const {
return mappedPointer;
}
MemoryAllocator::MemoryAllocator(Device* device)
:device(device) {
}
MemoryAllocator::~MemoryAllocator() {
ASSERT(releasedMemory.Empty());
}
bool MemoryAllocator::Allocate(VkMemoryRequirements requirements, bool mappable, DeviceMemoryAllocation* allocation) {
const VulkanDeviceInfo& info = device->GetDeviceInfo();
// Find a suitable memory type for this allocation
int bestType = -1;
for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
// Resource must support this memory type
if ((requirements.memoryTypeBits & (1 << i)) == 0) {
continue;
}
// Mappable resource must be host visible
if (mappable && (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
continue;
}
// Found the first candidate memory type
if (bestType == -1) {
bestType = static_cast<int>(i);
continue;
}
// For non-mappable resources, favor device local memory.
if (!mappable) {
if ((info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == 0 &&
(info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) {
bestType = static_cast<int>(i);
continue;
}
}
// All things equal favor the memory in the biggest heap
VkDeviceSize bestTypeHeapSize = info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
if (candidateHeapSize > bestTypeHeapSize) {
bestType = static_cast<int>(i);
continue;
}
}
// TODO(cwallez@chromium.org): I think the Vulkan spec guarantees this should never happen
if (bestType == -1) {
ASSERT(false);
return false;
}
VkMemoryAllocateInfo allocateInfo;
allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocateInfo.pNext = nullptr;
allocateInfo.allocationSize = requirements.size;
allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
if (device->fn.AllocateMemory(device->GetVkDevice(), &allocateInfo, nullptr, &allocatedMemory) != VK_SUCCESS) {
return false;
}
void* mappedPointer = nullptr;
if (mappable) {
if (device->fn.MapMemory(device->GetVkDevice(), allocatedMemory, 0, requirements.size, 0,
&mappedPointer) != VK_SUCCESS) {
return false;
}
}
allocation->memory = allocatedMemory;
allocation->offset = 0;
allocation->mappedPointer = reinterpret_cast<uint8_t*>(mappedPointer);
return true;
}
void MemoryAllocator::Free(DeviceMemoryAllocation* allocation) {
releasedMemory.Enqueue(allocation->memory, device->GetSerial());
allocation->memory = VK_NULL_HANDLE;
allocation->offset = 0;
allocation->mappedPointer = nullptr;
}
void MemoryAllocator::Tick(Serial finishedSerial) {
for (auto memory : releasedMemory.IterateUpTo(finishedSerial)) {
device->fn.FreeMemory(device->GetVkDevice(), memory, nullptr);
}
releasedMemory.ClearUpTo(finishedSerial);
}
}
}

View File

@ -0,0 +1,59 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_VULKAN_MEMORYALLOCATOR_H_
#define BACKEND_VULKAN_MEMORYALLOCATOR_H_
#include "backend/vulkan/vulkan_platform.h"
#include "common/SerialQueue.h"
namespace backend {
namespace vulkan {
class Device;
class MemoryAllocator;
class DeviceMemoryAllocation {
public:
~DeviceMemoryAllocation();
VkDeviceMemory GetMemory() const;
size_t GetMemoryOffset() const;
uint8_t* GetMappedPointer() const;
private:
friend class MemoryAllocator;
VkDeviceMemory memory = VK_NULL_HANDLE;
size_t offset = 0;
uint8_t* mappedPointer = nullptr;
};
class MemoryAllocator {
public:
MemoryAllocator(Device* device);
~MemoryAllocator();
bool Allocate(VkMemoryRequirements requirements, bool mappable, DeviceMemoryAllocation* allocation);
void Free(DeviceMemoryAllocation* allocation);
void Tick(Serial finishedSerial);
private:
Device* device = nullptr;
SerialQueue<VkDeviceMemory> releasedMemory;
};
}
}
#endif // BACKEND_VULKAN_MEMORYALLOCATOR_H_

View File

@ -15,6 +15,7 @@
#include "backend/vulkan/VulkanBackend.h"
#include "backend/Commands.h"
#include "backend/vulkan/BufferVk.h"
#include "common/Platform.h"
#include <spirv-cross/spirv_cross.hpp>
@ -103,9 +104,28 @@ namespace vulkan {
ASSERT(false);
return;
}
GatherQueueFromDevice();
mapReadRequestTracker = new MapReadRequestTracker(this);
memoryAllocator = new MemoryAllocator(this);
}
Device::~Device() {
// TODO(cwallez@chromium.org): properly wait on everything to be finished
Tick();
if (memoryAllocator) {
delete memoryAllocator;
memoryAllocator = nullptr;
}
if (mapReadRequestTracker) {
delete mapReadRequestTracker;
mapReadRequestTracker = nullptr;
}
// VkQueues are destroyed when the VkDevice is destroyed
if (vkDevice != VK_NULL_HANDLE) {
fn.DestroyDevice(vkDevice, nullptr);
vkDevice = VK_NULL_HANDLE;
@ -116,6 +136,7 @@ namespace vulkan {
debugReportCallback = VK_NULL_HANDLE;
}
// VkPhysicalDevices are destroyed when the VkInstance is destroyed
if (instance != VK_NULL_HANDLE) {
fn.DestroyInstance(instance, nullptr);
instance = VK_NULL_HANDLE;
@ -186,12 +207,39 @@ namespace vulkan {
}
void Device::TickImpl() {
// TODO(cwallez@chromium.org): Correctly track the serial with Semaphores
fn.QueueWaitIdle(queue);
completedSerial = nextSerial;
nextSerial++;
mapReadRequestTracker->Tick(completedSerial);
memoryAllocator->Tick(completedSerial);
}
const VulkanDeviceInfo& Device::GetDeviceInfo() const {
return deviceInfo;
}
MapReadRequestTracker* Device::GetMapReadRequestTracker() const {
return mapReadRequestTracker;
}
MemoryAllocator* Device::GetMemoryAllocator() const {
return memoryAllocator;
}
Serial Device::GetSerial() const {
return nextSerial;
}
VkInstance Device::GetInstance() const {
return instance;
}
VkDevice Device::GetVkDevice() const {
return vkDevice;
}
bool Device::CreateInstance(VulkanGlobalKnobs* usedKnobs) {
std::vector<const char*> layersToRequest;
std::vector<const char*> extensionsToRequest;
@ -293,6 +341,10 @@ namespace vulkan {
return true;
}
void Device::GatherQueueFromDevice() {
fn.GetDeviceQueue(vkDevice, queueFamily, 0, &queue);
}
bool Device::RegisterDebugReport() {
VkDebugReportCallbackCreateInfoEXT createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
@ -326,27 +378,6 @@ namespace vulkan {
return const_cast<VulkanFunctions*>(&fn);
}
// Buffer
Buffer::Buffer(BufferBuilder* builder)
: BufferBase(builder) {
}
Buffer::~Buffer() {
}
void Buffer::SetSubDataImpl(uint32_t, uint32_t, const uint32_t*) {
}
void Buffer::MapReadAsyncImpl(uint32_t, uint32_t, uint32_t) {
}
void Buffer::UnmapImpl() {
}
void Buffer::TransitionUsageImpl(nxt::BufferUsageBit, nxt::BufferUsageBit) {
}
// Queue
Queue::Queue(QueueBuilder* builder)

View File

@ -19,7 +19,6 @@
#include "backend/vulkan/VulkanFunctions.h"
#include "backend/vulkan/VulkanInfo.h"
#include "backend/Buffer.h"
#include "backend/BindGroup.h"
#include "backend/BindGroupLayout.h"
#include "backend/BlendState.h"
@ -39,6 +38,7 @@
#include "backend/Texture.h"
#include "backend/ToBackend.h"
#include "common/DynamicLib.h"
#include "common/Serial.h"
namespace backend {
namespace vulkan {
@ -64,6 +64,9 @@ namespace vulkan {
class Texture;
using TextureView = TextureViewBase;
class MapReadRequestTracker;
class MemoryAllocator;
struct VulkanBackendTraits {
using BindGroupType = BindGroup;
using BindGroupLayoutType = BindGroupLayout;
@ -119,14 +122,21 @@ namespace vulkan {
void TickImpl() override;
const VulkanDeviceInfo& GetDeviceInfo() const;
MapReadRequestTracker* GetMapReadRequestTracker() const;
MemoryAllocator* GetMemoryAllocator() const;
Serial GetSerial() const;
// Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
const VulkanFunctions fn;
VkInstance GetInstance() const;
VkDevice GetVkDevice() const;
private:
bool CreateInstance(VulkanGlobalKnobs* usedKnobs);
bool CreateDevice(VulkanDeviceKnobs* usedKnobs);
void GatherQueueFromDevice();
bool RegisterDebugReport();
static VkBool32 OnDebugReportCallback(VkDebugReportFlagsEXT flags,
@ -151,19 +161,13 @@ namespace vulkan {
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice vkDevice = VK_NULL_HANDLE;
uint32_t queueFamily = 0;
VkQueue queue = VK_NULL_HANDLE;
VkDebugReportCallbackEXT debugReportCallback = VK_NULL_HANDLE;
};
class Buffer : public BufferBase {
public:
Buffer(BufferBuilder* builder);
~Buffer();
private:
void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) override;
void MapReadAsyncImpl(uint32_t serial, uint32_t start, uint32_t count) override;
void UnmapImpl() override;
void TransitionUsageImpl(nxt::BufferUsageBit currentUsage, nxt::BufferUsageBit targetUsage) override;
Serial nextSerial = 1;
Serial completedSerial = 0;
MapReadRequestTracker* mapReadRequestTracker = nullptr;
MemoryAllocator* memoryAllocator = nullptr;
};
class Queue : public QueueBase {