Vulkan: Initial implementation of a swapchain.

This is currently hardcoded to work on one specific HW/OS/driver
version. It also assumes that the graphics queue is the same as the
present queue.
This commit is contained in:
Corentin Wallez 2018-01-26 17:20:36 -05:00 committed by Corentin Wallez
parent 672d7f26e1
commit 92baafc7a0
8 changed files with 412 additions and 46 deletions

View File

@ -302,6 +302,8 @@ if (NXT_ENABLE_VULKAN)
${VULKAN_DIR}/InputStateVk.h ${VULKAN_DIR}/InputStateVk.h
${VULKAN_DIR}/MemoryAllocator.cpp ${VULKAN_DIR}/MemoryAllocator.cpp
${VULKAN_DIR}/MemoryAllocator.h ${VULKAN_DIR}/MemoryAllocator.h
${VULKAN_DIR}/NativeSwapChainImplVk.cpp
${VULKAN_DIR}/NativeSwapChainImplVk.h
${VULKAN_DIR}/PipelineLayoutVk.cpp ${VULKAN_DIR}/PipelineLayoutVk.cpp
${VULKAN_DIR}/PipelineLayoutVk.h ${VULKAN_DIR}/PipelineLayoutVk.h
${VULKAN_DIR}/RenderPassVk.cpp ${VULKAN_DIR}/RenderPassVk.cpp

View File

@ -0,0 +1,208 @@
// Copyright 2018 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "backend/vulkan/NativeSwapChainImplVk.h"
#include "backend/vulkan/FencedDeleter.h"
#include "backend/vulkan/TextureVk.h"
#include "backend/vulkan/VulkanBackend.h"
namespace backend { namespace vulkan {
namespace {
bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
NativeSwapChainImpl::ChosenConfig* config) {
// TODO(cwallez@chromium.org): For now this is hardcoded to what works with one NVIDIA
// driver. Need to generalize
config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
config->format = nxt::TextureFormat::B8G8R8A8Unorm;
config->minImageCount = 3;
// TODO(cwallez@chromium.org): This is upside down compared to what we want, at least
// on Linux
config->preTransform = info.capabilities.currentTransform;
config->presentMode = VK_PRESENT_MODE_IMMEDIATE_KHR;
config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
return true;
}
} // anonymous namespace
NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
: mSurface(surface), mDevice(device) {
}
NativeSwapChainImpl::~NativeSwapChainImpl() {
if (mSwapChain != VK_NULL_HANDLE) {
mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
mSwapChain = VK_NULL_HANDLE;
}
if (mSurface != VK_NULL_HANDLE) {
mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
mSurface = VK_NULL_HANDLE;
}
}
void NativeSwapChainImpl::Init(nxtWSIContextVulkan* /*context*/) {
if (!GatherSurfaceInfo(*mDevice, mSurface, &mInfo)) {
ASSERT(false);
}
if (!ChooseSurfaceConfig(mInfo, &mConfig)) {
ASSERT(false);
}
}
nxtSwapChainError NativeSwapChainImpl::Configure(nxtTextureFormat format,
nxtTextureUsageBit usage,
uint32_t width,
uint32_t height) {
ASSERT(mInfo.capabilities.minImageExtent.width <= width);
ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
ASSERT(mInfo.capabilities.minImageExtent.height <= height);
ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
ASSERT(format == static_cast<nxtTextureFormat>(GetPreferredFormat()));
// TODO(cwallez@chromium.org): need to check usage works too
// Create the swapchain with the configuration we chose
VkSwapchainCreateInfoKHR createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.surface = mSurface;
createInfo.minImageCount = mConfig.minImageCount;
createInfo.imageFormat = mConfig.nativeFormat;
createInfo.imageColorSpace = mConfig.colorSpace;
createInfo.imageExtent.width = width;
createInfo.imageExtent.height = height;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage =
VulkanImageUsage(static_cast<nxt::TextureUsageBit>(usage), mConfig.format);
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.queueFamilyIndexCount = 0;
createInfo.pQueueFamilyIndices = nullptr;
createInfo.preTransform = mConfig.preTransform;
createInfo.compositeAlpha = mConfig.compositeAlpha;
createInfo.presentMode = mConfig.presentMode;
createInfo.clipped = false;
createInfo.oldSwapchain = VK_NULL_HANDLE;
if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
&mSwapChain) != VK_SUCCESS) {
ASSERT(false);
}
// Gather the swapchain's images. Implementations are allowed to return more images than the
// number we asked for.
uint32_t count = 0;
if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
nullptr) != VK_SUCCESS) {
ASSERT(false);
}
ASSERT(count >= mConfig.minImageCount);
mSwapChainImages.resize(count);
if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
mSwapChainImages.data()) != VK_SUCCESS) {
ASSERT(false);
}
// Do the initial layout transition for all these images from an undefined layout to
// present so that it matches the "present" usage after the first GetNextTexture.
VkCommandBuffer commands = mDevice->GetPendingCommandBuffer();
for (VkImage image : mSwapChainImages) {
VkImageMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = 0;
barrier.dstAccessMask = 0;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.srcQueueFamilyIndex = 0;
barrier.dstQueueFamilyIndex = 0;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
mDevice->fn.CmdPipelineBarrier(commands, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
}
return NXT_SWAP_CHAIN_NO_ERROR;
}
nxtSwapChainError NativeSwapChainImpl::GetNextTexture(nxtSwapChainNextTexture* nextTexture) {
// Transiently create a semaphore that will be signaled when the presentation engine is done
// with the swapchain image. Further operations on the image will wait for this semaphore.
VkSemaphore semaphore = VK_NULL_HANDLE;
{
VkSemaphoreCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
&semaphore) != VK_SUCCESS) {
ASSERT(false);
}
}
if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
std::numeric_limits<uint64_t>::max(), semaphore,
VK_NULL_HANDLE, &mLastImageIndex) != VK_SUCCESS) {
ASSERT(false);
}
nextTexture->texture.u64 = mSwapChainImages[mLastImageIndex].GetHandle();
mDevice->AddWaitSemaphore(semaphore);
return NXT_SWAP_CHAIN_NO_ERROR;
}
nxtSwapChainError NativeSwapChainImpl::Present() {
// Since we're going to do a queue operations we need to flush pending commands such as
// layout transitions of the swapchain images to the PRESENT layout.
mDevice->SubmitPendingCommands();
// Assuming that the present queue is the same as the graphics queue, the proper
// synchronization has already been done by the usage transition to present so we don't
// need to wait on any semaphores.
VkPresentInfoKHR presentInfo;
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
presentInfo.pNext = nullptr;
presentInfo.waitSemaphoreCount = 0;
presentInfo.pWaitSemaphores = nullptr;
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = &mSwapChain;
presentInfo.pImageIndices = &mLastImageIndex;
presentInfo.pResults = nullptr;
VkQueue queue = mDevice->GetQueue();
if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
ASSERT(false);
}
return NXT_SWAP_CHAIN_NO_ERROR;
}
nxt::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
return mConfig.format;
}
}} // namespace backend::vulkan

View File

@ -0,0 +1,69 @@
// Copyright 2018 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_VULKAN_NATIVESWAPCHAINIMPLVK_H_
#define BACKEND_VULKAN_NATIVESWAPCHAINIMPLVK_H_
#include "backend/vulkan/VulkanInfo.h"
#include "nxt/nxt_wsi.h"
#include "nxt/nxtcpp.h"
namespace backend { namespace vulkan {
class Device;
class NativeSwapChainImpl {
public:
using WSIContext = nxtWSIContextVulkan;
NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
~NativeSwapChainImpl();
void Init(nxtWSIContextVulkan* context);
nxtSwapChainError Configure(nxtTextureFormat format,
nxtTextureUsageBit,
uint32_t width,
uint32_t height);
nxtSwapChainError GetNextTexture(nxtSwapChainNextTexture* nextTexture);
nxtSwapChainError Present();
nxt::TextureFormat GetPreferredFormat() const;
struct ChosenConfig {
VkFormat nativeFormat;
nxt::TextureFormat format;
VkColorSpaceKHR colorSpace;
VkSurfaceTransformFlagBitsKHR preTransform;
uint32_t minImageCount;
VkPresentModeKHR presentMode;
VkCompositeAlphaFlagBitsKHR compositeAlpha;
};
private:
VkSurfaceKHR mSurface = VK_NULL_HANDLE;
VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
std::vector<VkImage> mSwapChainImages;
uint32_t mLastImageIndex = 0;
VulkanSurfaceInfo mInfo;
ChosenConfig mConfig;
Device* mDevice = nullptr;
};
}} // namespace backend::vulkan
#endif // BACKEND_VULKAN_NATIVESWAPCHAINIMPLVK_H_

View File

@ -43,34 +43,6 @@ namespace backend { namespace vulkan {
} }
} }
// Converts the NXT usage flags to Vulkan usage flags. Also needs the format to choose
// between color and depth attachment usages.
VkImageUsageFlags VulkanImageUsage(nxt::TextureUsageBit usage, nxt::TextureFormat format) {
VkImageUsageFlags flags = 0;
if (usage & nxt::TextureUsageBit::TransferSrc) {
flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
if (usage & nxt::TextureUsageBit::TransferDst) {
flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
}
if (usage & nxt::TextureUsageBit::Sampled) {
flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
}
if (usage & nxt::TextureUsageBit::Storage) {
flags |= VK_IMAGE_USAGE_STORAGE_BIT;
}
if (usage & nxt::TextureUsageBit::OutputAttachment) {
if (TextureFormatHasDepthOrStencil(format)) {
flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
} else {
flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
}
return flags;
}
// Computes which vulkan access type could be required for the given NXT usage. // Computes which vulkan access type could be required for the given NXT usage.
VkAccessFlags VulkanAccessFlags(nxt::TextureUsageBit usage, nxt::TextureFormat format) { VkAccessFlags VulkanAccessFlags(nxt::TextureUsageBit usage, nxt::TextureFormat format) {
VkAccessFlags flags = 0; VkAccessFlags flags = 0;
@ -227,6 +199,34 @@ namespace backend { namespace vulkan {
} }
} }
// Converts the NXT usage flags to Vulkan usage flags. Also needs the format to choose
// between color and depth attachment usages.
VkImageUsageFlags VulkanImageUsage(nxt::TextureUsageBit usage, nxt::TextureFormat format) {
VkImageUsageFlags flags = 0;
if (usage & nxt::TextureUsageBit::TransferSrc) {
flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
if (usage & nxt::TextureUsageBit::TransferDst) {
flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
}
if (usage & nxt::TextureUsageBit::Sampled) {
flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
}
if (usage & nxt::TextureUsageBit::Storage) {
flags |= VK_IMAGE_USAGE_STORAGE_BIT;
}
if (usage & nxt::TextureUsageBit::OutputAttachment) {
if (TextureFormatHasDepthOrStencil(format)) {
flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
} else {
flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
}
return flags;
}
Texture::Texture(TextureBuilder* builder) : TextureBase(builder) { Texture::Texture(TextureBuilder* builder) : TextureBase(builder) {
Device* device = ToBackend(GetDevice()); Device* device = ToBackend(GetDevice());

View File

@ -23,6 +23,7 @@
namespace backend { namespace vulkan { namespace backend { namespace vulkan {
VkFormat VulkanImageFormat(nxt::TextureFormat format); VkFormat VulkanImageFormat(nxt::TextureFormat format);
VkImageUsageFlags VulkanImageUsage(nxt::TextureUsageBit usage, nxt::TextureFormat format);
class Texture : public TextureBase { class Texture : public TextureBase {
public: public:

View File

@ -21,6 +21,7 @@
#include "backend/vulkan/FencedDeleter.h" #include "backend/vulkan/FencedDeleter.h"
#include "backend/vulkan/FramebufferVk.h" #include "backend/vulkan/FramebufferVk.h"
#include "backend/vulkan/InputStateVk.h" #include "backend/vulkan/InputStateVk.h"
#include "backend/vulkan/NativeSwapChainImplVk.h"
#include "backend/vulkan/PipelineLayoutVk.h" #include "backend/vulkan/PipelineLayoutVk.h"
#include "backend/vulkan/RenderPassVk.h" #include "backend/vulkan/RenderPassVk.h"
#include "backend/vulkan/RenderPipelineVk.h" #include "backend/vulkan/RenderPipelineVk.h"
@ -28,6 +29,7 @@
#include "backend/vulkan/SwapChainVk.h" #include "backend/vulkan/SwapChainVk.h"
#include "backend/vulkan/TextureVk.h" #include "backend/vulkan/TextureVk.h"
#include "common/Platform.h" #include "common/Platform.h"
#include "common/SwapChainUtils.h"
#include <spirv-cross/spirv_cross.hpp> #include <spirv-cross/spirv_cross.hpp>
@ -46,14 +48,31 @@ namespace backend { namespace vulkan {
nxtProcTable GetNonValidatingProcs(); nxtProcTable GetNonValidatingProcs();
nxtProcTable GetValidatingProcs(); nxtProcTable GetValidatingProcs();
void Init(nxtProcTable* procs, nxtDevice* device) { void Init(nxtProcTable* procs,
nxtDevice* device,
const std::vector<const char*>& requiredInstanceExtensions) {
*procs = GetValidatingProcs(); *procs = GetValidatingProcs();
*device = reinterpret_cast<nxtDevice>(new Device); *device = reinterpret_cast<nxtDevice>(new Device(requiredInstanceExtensions));
}
VkInstance GetInstance(nxtDevice device) {
Device* backendDevice = reinterpret_cast<Device*>(device);
return backendDevice->GetInstance();
}
nxtSwapChainImplementation CreateNativeSwapChainImpl(nxtDevice device, VkSurfaceKHR surface) {
Device* backendDevice = reinterpret_cast<Device*>(device);
return CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
}
nxtTextureFormat GetNativeSwapChainPreferredFormat(
const nxtSwapChainImplementation* swapChain) {
NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
return static_cast<nxtTextureFormat>(impl->GetPreferredFormat());
} }
// Device // Device
Device::Device() { Device::Device(const std::vector<const char*>& requiredInstanceExtensions) {
if (!mVulkanLib.Open(kVulkanLibName)) { if (!mVulkanLib.Open(kVulkanLibName)) {
ASSERT(false); ASSERT(false);
return; return;
@ -72,7 +91,7 @@ namespace backend { namespace vulkan {
} }
VulkanGlobalKnobs usedGlobalKnobs = {}; VulkanGlobalKnobs usedGlobalKnobs = {};
if (!CreateInstance(&usedGlobalKnobs)) { if (!CreateInstance(&usedGlobalKnobs, requiredInstanceExtensions)) {
ASSERT(false); ASSERT(false);
return; return;
} }
@ -145,6 +164,8 @@ namespace backend { namespace vulkan {
} }
mUnusedCommands.clear(); mUnusedCommands.clear();
ASSERT(mWaitSemaphores.empty());
for (VkFence fence : mUnusedFences) { for (VkFence fence : mUnusedFences) {
fn.DestroyFence(mVkDevice, fence, nullptr); fn.DestroyFence(mVkDevice, fence, nullptr);
} }
@ -274,6 +295,14 @@ namespace backend { namespace vulkan {
return mVkDevice; return mVkDevice;
} }
uint32_t Device::GetGraphicsQueueFamily() const {
return mQueueFamily;
}
VkQueue Device::GetQueue() const {
return mQueue;
}
MapReadRequestTracker* Device::GetMapReadRequestTracker() const { MapReadRequestTracker* Device::GetMapReadRequestTracker() const {
return mMapReadRequestTracker; return mMapReadRequestTracker;
} }
@ -321,12 +350,15 @@ namespace backend { namespace vulkan {
ASSERT(false); ASSERT(false);
} }
std::vector<VkPipelineStageFlags> dstStageMasks(mWaitSemaphores.size(),
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
VkSubmitInfo submitInfo; VkSubmitInfo submitInfo;
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.pNext = nullptr; submitInfo.pNext = nullptr;
submitInfo.waitSemaphoreCount = 0; submitInfo.waitSemaphoreCount = static_cast<uint32_t>(mWaitSemaphores.size());
submitInfo.pWaitSemaphores = nullptr; submitInfo.pWaitSemaphores = mWaitSemaphores.data();
submitInfo.pWaitDstStageMask = 0; submitInfo.pWaitDstStageMask = dstStageMasks.data();
submitInfo.commandBufferCount = 1; submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &mPendingCommands.commandBuffer; submitInfo.pCommandBuffers = &mPendingCommands.commandBuffer;
submitInfo.signalSemaphoreCount = 0; submitInfo.signalSemaphoreCount = 0;
@ -340,12 +372,33 @@ namespace backend { namespace vulkan {
mCommandsInFlight.Enqueue(mPendingCommands, mNextSerial); mCommandsInFlight.Enqueue(mPendingCommands, mNextSerial);
mPendingCommands = CommandPoolAndBuffer(); mPendingCommands = CommandPoolAndBuffer();
mFencesInFlight.emplace(fence, mNextSerial); mFencesInFlight.emplace(fence, mNextSerial);
for (VkSemaphore semaphore : mWaitSemaphores) {
mDeleter->DeleteWhenUnused(semaphore);
}
mWaitSemaphores.clear();
mNextSerial++; mNextSerial++;
} }
bool Device::CreateInstance(VulkanGlobalKnobs* usedKnobs) { void Device::AddWaitSemaphore(VkSemaphore semaphore) {
mWaitSemaphores.push_back(semaphore);
}
bool Device::CreateInstance(VulkanGlobalKnobs* usedKnobs,
const std::vector<const char*>& requiredExtensions) {
std::vector<const char*> layersToRequest; std::vector<const char*> layersToRequest;
std::vector<const char*> extensionsToRequest; std::vector<const char*> extensionsToRequest = requiredExtensions;
auto AddExtensionIfNotPresent = [](std::vector<const char*>* extensions,
const char* extension) {
for (const char* present : *extensions) {
if (strcmp(present, extension) == 0) {
return;
}
}
extensions->push_back(extension);
};
// vktrace works by instering a layer, so we need to explicitly enable it if it is present. // vktrace works by instering a layer, so we need to explicitly enable it if it is present.
// Also it is good to put it in first position so that it doesn't see Vulkan calls inserted // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
@ -368,12 +421,12 @@ namespace backend { namespace vulkan {
usedKnobs->standardValidation = true; usedKnobs->standardValidation = true;
} }
if (mGlobalInfo.debugReport) { if (mGlobalInfo.debugReport) {
extensionsToRequest.push_back(kExtensionNameExtDebugReport); AddExtensionIfNotPresent(&extensionsToRequest, kExtensionNameExtDebugReport);
usedKnobs->debugReport = true; usedKnobs->debugReport = true;
} }
#endif #endif
if (mGlobalInfo.surface) { if (mGlobalInfo.surface) {
extensionsToRequest.push_back(kExtensionNameKhrSurface); AddExtensionIfNotPresent(&extensionsToRequest, kExtensionNameKhrSurface);
usedKnobs->surface = true; usedKnobs->surface = true;
} }

View File

@ -92,7 +92,7 @@ namespace backend { namespace vulkan {
class Device : public DeviceBase { class Device : public DeviceBase {
public: public:
Device(); Device(const std::vector<const char*>& requiredInstanceExtensions);
~Device(); ~Device();
// Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo. // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
@ -102,6 +102,8 @@ namespace backend { namespace vulkan {
VkInstance GetInstance() const; VkInstance GetInstance() const;
VkPhysicalDevice GetPhysicalDevice() const; VkPhysicalDevice GetPhysicalDevice() const;
VkDevice GetVkDevice() const; VkDevice GetVkDevice() const;
uint32_t GetGraphicsQueueFamily() const;
VkQueue GetQueue() const;
BufferUploader* GetBufferUploader() const; BufferUploader* GetBufferUploader() const;
FencedDeleter* GetFencedDeleter() const; FencedDeleter* GetFencedDeleter() const;
@ -112,6 +114,7 @@ namespace backend { namespace vulkan {
VkCommandBuffer GetPendingCommandBuffer(); VkCommandBuffer GetPendingCommandBuffer();
void SubmitPendingCommands(); void SubmitPendingCommands();
void AddWaitSemaphore(VkSemaphore semaphore);
// NXT API // NXT API
BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) override; BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) override;
@ -137,7 +140,8 @@ namespace backend { namespace vulkan {
void TickImpl() override; void TickImpl() override;
private: private:
bool CreateInstance(VulkanGlobalKnobs* usedKnobs); bool CreateInstance(VulkanGlobalKnobs* usedKnobs,
const std::vector<const char*>& requiredExtensions);
bool CreateDevice(VulkanDeviceKnobs* usedKnobs); bool CreateDevice(VulkanDeviceKnobs* usedKnobs);
void GatherQueueFromDevice(); void GatherQueueFromDevice();
@ -197,6 +201,7 @@ namespace backend { namespace vulkan {
SerialQueue<CommandPoolAndBuffer> mCommandsInFlight; SerialQueue<CommandPoolAndBuffer> mCommandsInFlight;
std::vector<CommandPoolAndBuffer> mUnusedCommands; std::vector<CommandPoolAndBuffer> mUnusedCommands;
CommandPoolAndBuffer mPendingCommands; CommandPoolAndBuffer mPendingCommands;
std::vector<VkSemaphore> mWaitSemaphores;
}; };
class Queue : public QueueBase { class Queue : public QueueBase {

View File

@ -14,11 +14,23 @@
#include "utils/BackendBinding.h" #include "utils/BackendBinding.h"
#include "common/SwapChainUtils.h" #include "common/Assert.h"
#include "common/vulkan_platform.h"
#include "nxt/nxt_wsi.h" #include "nxt/nxt_wsi.h"
#include "GLFW/glfw3.h"
#include <vector>
namespace backend { namespace vulkan { namespace backend { namespace vulkan {
void Init(nxtProcTable* procs, nxtDevice* device); void Init(nxtProcTable* procs,
nxtDevice* device,
const std::vector<const char*>& requiredInstanceExtensions);
VkInstance GetInstance(nxtDevice device);
nxtSwapChainImplementation CreateNativeSwapChainImpl(nxtDevice device, VkSurfaceKHR surface);
nxtTextureFormat GetNativeSwapChainPreferredFormat(const nxtSwapChainImplementation* swapChain);
}} // namespace backend::vulkan }} // namespace backend::vulkan
namespace utils { namespace utils {
@ -52,21 +64,37 @@ namespace utils {
class VulkanBinding : public BackendBinding { class VulkanBinding : public BackendBinding {
public: public:
void SetupGLFWWindowHints() override { void SetupGLFWWindowHints() override {
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
} }
void GetProcAndDevice(nxtProcTable* procs, nxtDevice* device) override { void GetProcAndDevice(nxtProcTable* procs, nxtDevice* device) override {
backend::vulkan::Init(procs, device); uint32_t extensionCount = 0;
const char** glfwInstanceExtensions =
glfwGetRequiredInstanceExtensions(&extensionCount);
std::vector<const char*> requiredExtensions(glfwInstanceExtensions,
glfwInstanceExtensions + extensionCount);
backend::vulkan::Init(procs, device, requiredExtensions);
mDevice = *device;
} }
uint64_t GetSwapChainImplementation() override { uint64_t GetSwapChainImplementation() override {
if (mSwapchainImpl.userData == nullptr) { if (mSwapchainImpl.userData == nullptr) {
mSwapchainImpl = CreateSwapChainImplementation(new SwapChainImplVulkan(mWindow)); VkSurfaceKHR surface = VK_NULL_HANDLE;
if (glfwCreateWindowSurface(backend::vulkan::GetInstance(mDevice), mWindow, nullptr,
&surface) != VK_SUCCESS) {
ASSERT(false);
}
mSwapchainImpl = backend::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
} }
return reinterpret_cast<uint64_t>(&mSwapchainImpl); return reinterpret_cast<uint64_t>(&mSwapchainImpl);
} }
nxtTextureFormat GetPreferredSwapChainTextureFormat() override { nxtTextureFormat GetPreferredSwapChainTextureFormat() override {
return NXT_TEXTURE_FORMAT_R8_G8_B8_A8_UNORM; ASSERT(mSwapchainImpl.userData != nullptr);
return backend::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
} }
private: private:
nxtDevice mDevice;
nxtSwapChainImplementation mSwapchainImpl = {}; nxtSwapChainImplementation mSwapchainImpl = {};
}; };