Add support for cubemap textures in GL and Vulkan

This commit is contained in:
Jack Andersen 2019-05-31 17:39:55 -10:00
parent 22cb5bb0a8
commit 18faf55750
7 changed files with 756 additions and 122 deletions

View File

@ -39,6 +39,7 @@ public:
ObjToken<ITextureD> newDynamicTexture(size_t width, size_t height, TextureFormat fmt, TextureClampMode clampMode);
ObjToken<ITextureR> newRenderTexture(size_t width, size_t height, TextureClampMode clampMode,
size_t colorBindingCount, size_t depthBindingCount);
ObjToken<ITextureCubeR> newCubeRenderTexture(size_t width, size_t mips);
ObjToken<IShaderStage> newShaderStage(const uint8_t* data, size_t size, PipelineStage stage);

View File

@ -15,10 +15,13 @@ struct IGraphicsCommandQueue {
virtual void setShaderDataBinding(const ObjToken<IShaderDataBinding>& binding) = 0;
virtual void setRenderTarget(const ObjToken<ITextureR>& target) = 0;
virtual void setRenderTarget(const ObjToken<ITextureCubeR>& target, int face) = 0;
virtual void setViewport(const SWindowRect& rect, float znear = 0.f, float zfar = 1.f) = 0;
virtual void setScissor(const SWindowRect& rect) = 0;
virtual void resizeRenderTexture(const ObjToken<ITextureR>& tex, size_t width, size_t height) = 0;
virtual void resizeRenderTexture(const ObjToken<ITextureCubeR>& tex, size_t width, size_t mips) = 0;
virtual void generateMipmaps(const ObjToken<ITextureCubeR>& tex) = 0;
virtual void schedulePostFrameHandler(std::function<void(void)>&& func) = 0;
virtual void setClearColor(const float rgba[4]) = 0;

View File

@ -44,7 +44,7 @@ protected:
};
/** Texture access types */
enum class TextureType { Static, StaticArray, Dynamic, Render };
enum class TextureType { Static, StaticArray, Dynamic, Render, CubeRender };
/** Supported texture formats */
enum class TextureFormat { RGBA8, I8, I16, DXT1, DXT3, PVRTC4 };
@ -92,6 +92,12 @@ protected:
ITextureR() : ITexture(TextureType::Render) {}
};
/** Resource buffer for cube render-target textures */
struct ITextureCubeR : ITexture {
protected:
ITextureCubeR() : ITexture(TextureType::CubeRender) {}
};
/** Types of vertex attributes */
enum class VertexSemantic {
None = 0,
@ -223,6 +229,7 @@ struct IGraphicsDataFactory {
TextureClampMode clampMode) = 0;
virtual ObjToken<ITextureR> newRenderTexture(size_t width, size_t height, TextureClampMode clampMode,
size_t colorBindingCount, size_t depthBindingCount) = 0;
virtual ObjToken<ITextureCubeR> newCubeRenderTexture(size_t width, size_t mips) = 0;
virtual ObjToken<IShaderStage> newShaderStage(const uint8_t* data, size_t size, PipelineStage stage) = 0;

View File

@ -24,6 +24,10 @@ struct VulkanContext {
std::vector<VkExtensionProperties> extensions;
};
#ifndef NDEBUG
PFN_vkDestroyDebugReportCallbackEXT m_destroyDebugReportCallback = nullptr;
VkDebugReportCallbackEXT m_debugReportCallback = VK_NULL_HANDLE;
#endif
std::vector<LayerProperties> m_instanceLayerProperties;
std::vector<const char*> m_layerNames;
std::vector<const char*> m_instanceExtensionNames;
@ -43,6 +47,7 @@ struct VulkanContext {
VkDescriptorSetLayout m_descSetLayout = VK_NULL_HANDLE;
VkPipelineLayout m_pipelinelayout = VK_NULL_HANDLE;
VkRenderPass m_pass = VK_NULL_HANDLE;
VkRenderPass m_passOneSample = VK_NULL_HANDLE;
VkRenderPass m_passColorOnly = VK_NULL_HANDLE;
VkCommandPool m_loadPool = VK_NULL_HANDLE;
VkCommandBuffer m_loadCmdBuf = VK_NULL_HANDLE;
@ -143,6 +148,7 @@ public:
TextureClampMode clampMode);
boo::ObjToken<ITextureR> newRenderTexture(size_t width, size_t height, TextureClampMode clampMode,
size_t colorBindCount, size_t depthBindCount);
ObjToken<ITextureCubeR> newCubeRenderTexture(size_t width, size_t mips);
ObjToken<IShaderStage> newShaderStage(const uint8_t* data, size_t size, PipelineStage stage);

View File

@ -41,7 +41,7 @@ struct BaseGraphicsData : ListNode<BaseGraphicsData, GraphicsDataFactoryHead*> {
__BooTraceFields
GraphicsDataNode<IShaderStage, BaseGraphicsData>* m_Ss = nullptr;
GraphicsDataNode<IShaderStage, BaseGraphicsData>* m_Ss = nullptr;
GraphicsDataNode<IShaderPipeline, BaseGraphicsData>* m_SPs = nullptr;
GraphicsDataNode<IShaderDataBinding, BaseGraphicsData>* m_SBinds = nullptr;
GraphicsDataNode<IGraphicsBufferS, BaseGraphicsData>* m_SBufs = nullptr;
@ -50,6 +50,7 @@ struct BaseGraphicsData : ListNode<BaseGraphicsData, GraphicsDataFactoryHead*> {
GraphicsDataNode<ITextureSA, BaseGraphicsData>* m_SATexs = nullptr;
GraphicsDataNode<ITextureD, BaseGraphicsData>* m_DTexs = nullptr;
GraphicsDataNode<ITextureR, BaseGraphicsData>* m_RTexs = nullptr;
GraphicsDataNode<ITextureCubeR, BaseGraphicsData>* m_CubeRTexs = nullptr;
template <class T>
GraphicsDataNode<T, BaseGraphicsData>*& getHead();
template <class T>
@ -101,6 +102,10 @@ template <>
inline GraphicsDataNode<ITextureR, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureR>() {
return m_RTexs;
}
template <>
inline GraphicsDataNode<ITextureCubeR, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureCubeR>() {
return m_CubeRTexs;
}
/** Private generalized pool container class.
* Keeps head pointer to exactly one dynamic buffer while otherwise conforming to BaseGraphicsData
@ -113,7 +118,7 @@ struct BaseGraphicsPool : ListNode<BaseGraphicsPool, GraphicsDataFactoryHead*> {
__BooTraceFields
GraphicsDataNode<IGraphicsBufferD, BaseGraphicsPool>* m_DBufs = nullptr;
GraphicsDataNode<IGraphicsBufferD, BaseGraphicsPool>* m_DBufs = nullptr;
template <class T>
GraphicsDataNode<T, BaseGraphicsPool>*& getHead();
template <class T>

View File

@ -85,6 +85,8 @@ class GLDataFactoryImpl : public GLDataFactory, public GraphicsDataFactoryHead {
m_maxPatchSize = uint32_t(maxPVerts);
}
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
commitTransaction([this](IGraphicsDataFactory::Context& ctx) {
auto vertex = ctx.newShaderStage((uint8_t*)GammaVS, 0, PipelineStage::Vertex);
auto fragment = ctx.newShaderStage((uint8_t*)GammaFS, 0, PipelineStage::Fragment);
@ -601,6 +603,62 @@ public:
}
};
class GLTextureCubeR : public GraphicsDataNode<ITextureCubeR> {
friend class GLDataFactory;
friend struct GLCommandQueue;
struct GLCommandQueue* m_q;
GLuint m_texs[2] = {};
GLuint m_fbos[6] = {};
size_t m_width = 0;
size_t m_mipCount = 0;
GLenum m_colorFormat;
GLTextureCubeR(const ObjToken<BaseGraphicsData>& parent, GLCommandQueue* q, size_t width, size_t mips, GLenum colorFormat);
public:
~GLTextureCubeR() {
glDeleteTextures(2, m_texs);
glDeleteFramebuffers(6, m_fbos);
}
void setClampMode(TextureClampMode mode) {}
void bind(size_t idx) const {
glActiveTexture(GL_TEXTURE0 + idx);
glBindTexture(GL_TEXTURE_CUBE_MAP, m_texs[0]);
}
void _allocateTextures() {
GLenum compType = m_colorFormat == GL_RGBA16 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
glBindTexture(GL_TEXTURE_CUBE_MAP, m_texs[0]);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAX_LEVEL, m_mipCount - 1);
for (int f = 0; f < 6; ++f) {
size_t tmpWidth = m_width;
for (int m = 0; m < m_mipCount; ++m) {
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + f, m, m_colorFormat, tmpWidth, tmpWidth,
0, GL_RGBA, compType, nullptr);
tmpWidth >>= 1;
}
}
glBindTexture(GL_TEXTURE_CUBE_MAP, m_texs[1]);
for (int f = 0; f < 6; ++f)
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + f, 0, GL_DEPTH_COMPONENT32F, m_width, m_width, 0, GL_DEPTH_COMPONENT,
GL_UNSIGNED_INT, nullptr);
}
void resize(size_t width, size_t mips) {
m_width = width;
m_mipCount = mips;
_allocateTextures();
for (int f = 0; f < 6; ++f) {
glBindFramebuffer(GL_FRAMEBUFFER, m_fbos[f]);
glDepthMask(GL_TRUE);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
}
};
ObjToken<ITextureS> GLDataFactory::Context::newStaticTexture(size_t width, size_t height, size_t mips,
TextureFormat fmt, TextureClampMode clampMode,
const void* data, size_t sz) {
@ -899,7 +957,8 @@ public:
} else
glDisable(GL_CULL_FACE);
glPatchParameteri(GL_PATCH_VERTICES, m_patchSize);
if (m_patchSize)
glPatchParameteri(GL_PATCH_VERTICES, m_patchSize);
return m_prog;
}
@ -1008,6 +1067,9 @@ struct GLShaderDataBinding : GraphicsDataNode<IShaderDataBinding> {
case TextureType::Render:
tex.tex.cast<GLTextureR>()->bind(i, tex.idx, tex.depth);
break;
case TextureType::CubeRender:
tex.tex.cast<GLTextureCubeR>()->bind(i);
break;
default:
break;
}
@ -1062,6 +1124,7 @@ struct GLCommandQueue : IGraphicsCommandQueue {
enum class Op {
SetShaderDataBinding,
SetRenderTarget,
SetCubeRenderTarget,
SetViewport,
SetScissor,
SetClearColor,
@ -1071,6 +1134,7 @@ struct GLCommandQueue : IGraphicsCommandQueue {
DrawInstances,
DrawInstancesIndexed,
ResolveBindTexture,
GenerateMips,
Present
} m_op;
union {
@ -1088,7 +1152,7 @@ struct GLCommandQueue : IGraphicsCommandQueue {
};
};
ObjToken<IShaderDataBinding> binding;
ObjToken<ITextureR> target;
ObjToken<ITexture> target;
ObjToken<ITextureR> source;
ObjToken<ITextureR> resolveTex;
int bindIdx;
@ -1113,13 +1177,20 @@ struct GLCommandQueue : IGraphicsCommandQueue {
size_t height;
};
struct CubeRenderTextureResize {
ObjToken<ITextureCubeR> tex;
size_t width, mips;
};
/* These members are locked for multithreaded access */
std::vector<RenderTextureResize> m_pendingResizes;
std::vector<CubeRenderTextureResize> m_pendingCubeResizes;
std::vector<std::function<void(void)>> m_pendingPosts1;
std::vector<std::function<void(void)>> m_pendingPosts2;
std::vector<ObjToken<IShaderDataBinding>> m_pendingFmtAdds;
std::vector<std::array<GLuint, 3>> m_pendingFmtDels;
std::vector<ObjToken<ITextureR>> m_pendingFboAdds;
std::vector<ObjToken<ITextureCubeR>> m_pendingCubeFboAdds;
static void ConfigureVertexFormat(GLShaderDataBinding* fmt) {
glGenVertexArrays(3, fmt->m_vao.data());
@ -1202,6 +1273,15 @@ struct GLCommandQueue : IGraphicsCommandQueue {
}
}
static void ConfigureFBO(GLTextureCubeR* tex) {
glGenFramebuffers(6, tex->m_fbos);
for (int i = 0; i < 6; ++i) {
glBindFramebuffer(GL_FRAMEBUFFER, tex->m_fbos[i]);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, tex->m_texs[0], 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, tex->m_texs[1], 0);
}
}
static void RenderingWorker(GLCommandQueue* self) {
BOO_MSAN_NO_INTERCEPT
#if _WIN32
@ -1252,12 +1332,24 @@ struct GLCommandQueue : IGraphicsCommandQueue {
self->m_pendingFboAdds.clear();
}
if (self->m_pendingCubeFboAdds.size()) {
for (ObjToken<ITextureCubeR>& tex : self->m_pendingCubeFboAdds)
ConfigureFBO(tex.cast<GLTextureCubeR>());
self->m_pendingCubeFboAdds.clear();
}
if (self->m_pendingResizes.size()) {
for (const RenderTextureResize& resize : self->m_pendingResizes)
resize.tex.cast<GLTextureR>()->resize(resize.width, resize.height);
self->m_pendingResizes.clear();
}
if (self->m_pendingCubeResizes.size()) {
for (const CubeRenderTextureResize& resize : self->m_pendingCubeResizes)
resize.tex.cast<GLTextureCubeR>()->resize(resize.width, resize.mips);
self->m_pendingCubeResizes.clear();
}
{
std::lock_guard<std::recursive_mutex> fmtLk(self->m_fmtMt);
@ -1279,20 +1371,24 @@ struct GLCommandQueue : IGraphicsCommandQueue {
}
std::vector<Command>& cmds = self->m_cmdBufs[self->m_drawBuf];
GLenum currentPrim = GL_TRIANGLES;
const GLShaderDataBinding* curBinding = nullptr;
GLuint curFBO = 0;
for (const Command& cmd : cmds) {
switch (cmd.m_op) {
case Command::Op::SetShaderDataBinding: {
const GLShaderDataBinding* binding = cmd.binding.cast<GLShaderDataBinding>();
binding->bind(self->m_drawBuf);
curBinding = binding;
currentPrim = binding->m_pipeline.cast<GLShaderPipeline>()->m_drawPrim;
break;
}
case Command::Op::SetRenderTarget: {
const GLTextureR* tex = cmd.target.cast<GLTextureR>();
curFBO = (!tex) ? 0 : tex->m_fbo;
curFBO = tex ? tex->m_fbo : 0;
glBindFramebuffer(GL_FRAMEBUFFER, curFBO);
break;
}
case Command::Op::SetCubeRenderTarget: {
const GLTextureCubeR* tex = cmd.target.cast<GLTextureCubeR>();
curFBO = tex ? tex->m_fbos[cmd.bindIdx] : 0;
glBindFramebuffer(GL_FRAMEBUFFER, curFBO);
break;
}
@ -1380,6 +1476,13 @@ struct GLCommandQueue : IGraphicsCommandQueue {
glBindFramebuffer(GL_FRAMEBUFFER, curFBO);
break;
}
case Command::Op::GenerateMips: {
if (const GLTextureCubeR* tex = cmd.target.cast<GLTextureCubeR>()) {
glBindTexture(GL_TEXTURE_CUBE_MAP, tex->m_texs[0]);
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
}
break;
}
case Command::Op::Present: {
if (const GLTextureR* tex = cmd.source.cast<GLTextureR>()) {
#ifndef NDEBUG
@ -1406,6 +1509,27 @@ struct GLCommandQueue : IGraphicsCommandQueue {
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, tex->m_width, tex->m_height, 0, 0, tex->m_width, tex->m_height,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
#if 1
/* First cubemap dump */
int offset = 0;
int voffset = 0;
for (BaseGraphicsData& data : *dataFactory->m_dataHead) {
if (GLTextureCubeR* cube = static_cast<GLTextureCubeR*>(data.getHead<ITextureCubeR>())) {
for (int i = 0; i < 6; ++i) {
glBindFramebuffer(GL_READ_FRAMEBUFFER, cube->m_fbos[i]);
glBlitFramebuffer(0, 0, cube->m_width, cube->m_width, offset, voffset,
cube->m_width + offset, cube->m_width + voffset,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
offset += cube->m_width;
if (i == 2) {
offset = 0;
voffset += cube->m_width;
}
}
break;
}
}
#endif
}
}
self->m_parent->present();
@ -1458,7 +1582,14 @@ struct GLCommandQueue : IGraphicsCommandQueue {
void setRenderTarget(const ObjToken<ITextureR>& target) {
std::vector<Command>& cmds = m_cmdBufs[m_fillBuf];
cmds.emplace_back(Command::Op::SetRenderTarget);
cmds.back().target = target;
cmds.back().target = target.get();
}
void setRenderTarget(const ObjToken<ITextureCubeR>& target, int face) {
std::vector<Command>& cmds = m_cmdBufs[m_fillBuf];
cmds.emplace_back(Command::Op::SetCubeRenderTarget);
cmds.back().target = target.get();
cmds.back().bindIdx = face;
}
void setViewport(const SWindowRect& rect, float znear, float zfar) {
@ -1481,6 +1612,18 @@ struct GLCommandQueue : IGraphicsCommandQueue {
m_pendingResizes.push_back({texgl, width, height});
}
void resizeRenderTexture(const ObjToken<ITextureCubeR>& tex, size_t width, size_t mips) {
std::unique_lock<std::mutex> lk(m_mt);
GLTextureCubeR* texgl = tex.cast<GLTextureCubeR>();
m_pendingCubeResizes.push_back({texgl, width, mips});
}
void generateMipmaps(const ObjToken<ITextureCubeR>& tex) {
std::vector<Command>& cmds = m_cmdBufs[m_fillBuf];
cmds.emplace_back(Command::Op::GenerateMips);
cmds.back().target = tex.get();
}
void schedulePostFrameHandler(std::function<void(void)>&& func) { m_pendingPosts1.push_back(std::move(func)); }
void setClearColor(const float rgba[4]) {
@ -1576,6 +1719,11 @@ struct GLCommandQueue : IGraphicsCommandQueue {
m_pendingFboAdds.push_back(tex);
}
void addFBO(const ObjToken<ITextureCubeR>& tex) {
std::unique_lock<std::mutex> lk(m_mt);
m_pendingCubeFboAdds.push_back(tex);
}
void execute() {
BOO_MSAN_NO_INTERCEPT
std::unique_lock<std::mutex> lk(m_mt);
@ -1703,6 +1851,32 @@ ObjToken<ITextureR> GLDataFactory::Context::newRenderTexture(size_t width, size_
return retval;
}
GLTextureCubeR::GLTextureCubeR(const ObjToken<BaseGraphicsData>& parent, GLCommandQueue* q, size_t width, size_t mips, GLenum colorFormat)
: GraphicsDataNode<ITextureCubeR>(parent)
, m_q(q)
, m_width(width)
, m_mipCount(mips)
, m_colorFormat(colorFormat) {
glGenTextures(2, m_texs);
_allocateTextures();
glBindTexture(GL_TEXTURE_CUBE_MAP, m_texs[0]);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
m_q->addFBO(this);
}
ObjToken<ITextureCubeR> GLDataFactory::Context::newCubeRenderTexture(size_t width, size_t mips) {
GLDataFactoryImpl& factory = static_cast<GLDataFactoryImpl&>(m_parent);
GLCommandQueue* q = static_cast<GLCommandQueue*>(factory.m_parent->getCommandQueue());
BOO_MSAN_NO_INTERCEPT
ObjToken<ITextureCubeR> retval(new GLTextureCubeR(m_data, q, width, mips,
factory.m_glCtx->m_deepColor ? GL_RGBA16 : GL_RGBA8));
return retval;
}
ObjToken<IShaderDataBinding> GLDataFactory::Context::newShaderDataBinding(
const ObjToken<IShaderPipeline>& pipeline, const ObjToken<IGraphicsBuffer>& vbo,
const ObjToken<IGraphicsBuffer>& instVbo, const ObjToken<IGraphicsBuffer>& ibo, size_t ubufCount,

View File

@ -174,7 +174,7 @@ static VKAPI_ATTR VkBool32 VKAPI_CALL dbgFunc(VkDebugReportFlagsEXT msgFlags, Vk
static void SetImageLayout(VkCommandBuffer cmd, VkImage image, VkImageAspectFlags aspectMask,
VkImageLayout old_image_layout, VkImageLayout new_image_layout, uint32_t mipCount,
uint32_t layerCount) {
uint32_t layerCount, uint32_t baseMipLevel = 0) {
VkImageMemoryBarrier imageMemoryBarrier = {};
imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrier.pNext = NULL;
@ -184,7 +184,7 @@ static void SetImageLayout(VkCommandBuffer cmd, VkImage image, VkImageAspectFlag
imageMemoryBarrier.newLayout = new_image_layout;
imageMemoryBarrier.image = image;
imageMemoryBarrier.subresourceRange.aspectMask = aspectMask;
imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
imageMemoryBarrier.subresourceRange.baseMipLevel = baseMipLevel;
imageMemoryBarrier.subresourceRange.levelCount = mipCount;
imageMemoryBarrier.subresourceRange.layerCount = layerCount;
imageMemoryBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@ -413,20 +413,23 @@ bool VulkanContext::initVulkan(std::string_view appName, PFN_vkGetInstanceProcAd
}
#ifndef NDEBUG
VkDebugReportCallbackEXT debugReportCallback;
PFN_vkCreateDebugReportCallbackEXT createDebugReportCallback =
(PFN_vkCreateDebugReportCallbackEXT)vk::GetInstanceProcAddr(m_instance, "vkCreateDebugReportCallbackEXT");
if (!createDebugReportCallback)
Log.report(logvisor::Fatal, "GetInstanceProcAddr: Unable to find vkCreateDebugReportCallbackEXT function.");
m_destroyDebugReportCallback =
(PFN_vkDestroyDebugReportCallbackEXT)vk::GetInstanceProcAddr(m_instance, "vkDestroyDebugReportCallbackEXT");
if (!m_destroyDebugReportCallback)
Log.report(logvisor::Fatal, "GetInstanceProcAddr: Unable to find vkDestroyDebugReportCallbackEXT function.");
VkDebugReportCallbackCreateInfoEXT debugCreateInfo = {};
debugCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
debugCreateInfo.pNext = nullptr;
debugCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
debugCreateInfo.pfnCallback = dbgFunc;
debugCreateInfo.pUserData = nullptr;
ThrowIfFailed(createDebugReportCallback(m_instance, &debugCreateInfo, nullptr, &debugReportCallback));
ThrowIfFailed(createDebugReportCallback(m_instance, &debugCreateInfo, nullptr, &m_debugReportCallback));
#endif
vk::init_dispatch_table_middle(m_instance, false);
@ -626,11 +629,20 @@ void VulkanContext::initDevice() {
}
void VulkanContext::destroyDevice() {
for (auto& s : m_samplers)
vk::DestroySampler(m_dev, s.second, nullptr);
m_samplers.clear();
if (m_passColorOnly) {
vk::DestroyRenderPass(m_dev, m_passColorOnly, nullptr);
m_passColorOnly = VK_NULL_HANDLE;
}
if (m_passOneSample) {
vk::DestroyRenderPass(m_dev, m_passOneSample, nullptr);
m_passOneSample = VK_NULL_HANDLE;
}
if (m_pass) {
vk::DestroyRenderPass(m_dev, m_pass, nullptr);
m_pass = VK_NULL_HANDLE;
@ -656,6 +668,13 @@ void VulkanContext::destroyDevice() {
m_allocator = VK_NULL_HANDLE;
}
#ifndef NDEBUG
if (m_debugReportCallback) {
m_destroyDebugReportCallback(m_instance, m_debugReportCallback, nullptr);
m_debugReportCallback = VK_NULL_HANDLE;
}
#endif
if (m_dev) {
vk::DestroyDevice(m_dev, nullptr);
m_dev = VK_NULL_HANDLE;
@ -772,9 +791,13 @@ void VulkanContext::initSwapChain(VulkanContext::Window& windowCtx, VkSurfaceKHR
renderPass.pSubpasses = &subpass;
ThrowIfFailed(vk::CreateRenderPass(m_dev, &renderPass, nullptr, &m_pass));
/* render pass one sample */
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
attachments[1].samples = VK_SAMPLE_COUNT_1_BIT;
ThrowIfFailed(vk::CreateRenderPass(m_dev, &renderPass, nullptr, &m_passOneSample));
/* render pass color only */
attachments[0].format = m_displayFormat;
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
renderPass.attachmentCount = 1;
subpass.pDepthStencilAttachment = nullptr;
ThrowIfFailed(vk::CreateRenderPass(m_dev, &renderPass, nullptr, &m_passColorOnly));
@ -1072,6 +1095,8 @@ struct AllocatedBuffer {
struct AllocatedImage {
VkImage m_image = VK_NULL_HANDLE;
VmaAllocation m_allocation;
VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageLayout m_committedLayout = VK_IMAGE_LAYOUT_UNDEFINED;
void _create(VulkanContext* ctx, const VkImageCreateInfo* pImageCreateInfo, VmaAllocationCreateFlags flags) {
assert(m_image == VK_NULL_HANDLE && "create may only be called once");
@ -1091,8 +1116,21 @@ struct AllocatedImage {
if (m_image) {
vmaDestroyImage(ctx->m_allocator, m_image, m_allocation);
m_image = VK_NULL_HANDLE;
m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
m_committedLayout = VK_IMAGE_LAYOUT_UNDEFINED;
}
}
void toLayout(VkCommandBuffer cmdBuf, VkImageAspectFlags aspect, VkImageLayout layout,
uint32_t mipCount = 1, uint32_t layerCount = 1, uint32_t baseMipLevel = 0) {
if (layout != m_layout) {
SetImageLayout(cmdBuf, m_image, aspect, m_layout, layout, mipCount, layerCount, baseMipLevel);
m_layout = layout;
}
}
void commitLayout() { m_committedLayout = m_layout; }
void rollbackLayout() { m_layout = m_committedLayout; }
};
struct VulkanData : BaseGraphicsData {
@ -1398,8 +1436,8 @@ public:
/* Since we're going to blit to the texture image, set its layout to
* DESTINATION_OPTIMAL */
SetImageLayout(ctx->m_loadCmdBuf, m_gpuTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_mips, 1);
m_gpuTex.toLayout(ctx->m_loadCmdBuf, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_mips);
VkBufferImageCopy copyRegions[16] = {};
size_t width = m_width;
@ -1431,8 +1469,8 @@ public:
/* Set the layout for the texture image from DESTINATION_OPTIMAL to
* SHADER_READ_ONLY */
SetImageLayout(ctx->m_loadCmdBuf, m_gpuTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, m_mips, 1);
m_gpuTex.toLayout(ctx->m_loadCmdBuf, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, m_mips);
}
TextureFormat format() const { return m_fmt; }
@ -1557,8 +1595,8 @@ public:
/* Since we're going to blit to the texture image, set its layout to
* DESTINATION_OPTIMAL */
SetImageLayout(ctx->m_loadCmdBuf, m_gpuTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_mips, m_layers);
m_gpuTex.toLayout(ctx->m_loadCmdBuf, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_mips, m_layers);
VkBufferImageCopy copyRegions[16] = {};
size_t width = m_width;
@ -1590,8 +1628,8 @@ public:
/* Set the layout for the texture image from DESTINATION_OPTIMAL to
* SHADER_READ_ONLY */
SetImageLayout(ctx->m_loadCmdBuf, m_gpuTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, m_mips, m_layers);
m_gpuTex.toLayout(ctx->m_loadCmdBuf, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, m_mips, m_layers);
}
TextureFormat format() const { return m_fmt; }
@ -1737,7 +1775,6 @@ class VulkanTextureR : public GraphicsDataNode<ITextureR> {
void Setup(VulkanContext* ctx) {
/* no-ops on first call */
doDestroy();
m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
/* color target */
VkImageCreateInfo texCreateInfo = {};
@ -1769,7 +1806,6 @@ class VulkanTextureR : public GraphicsDataNode<ITextureR> {
texCreateInfo.samples = VkSampleCountFlagBits(1);
for (size_t i = 0; i < m_colorBindCount; ++i) {
m_colorBindLayout[i] = VK_IMAGE_LAYOUT_UNDEFINED;
texCreateInfo.format = ctx->m_internalFormat;
texCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
m_colorBindTex[i].createFB(ctx, &texCreateInfo);
@ -1779,7 +1815,6 @@ class VulkanTextureR : public GraphicsDataNode<ITextureR> {
}
for (size_t i = 0; i < m_depthBindCount; ++i) {
m_depthBindLayout[i] = VK_IMAGE_LAYOUT_UNDEFINED;
texCreateInfo.format = VK_FORMAT_D32_SFLOAT;
texCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
m_depthBindTex[i].createFB(ctx, &texCreateInfo);
@ -1873,10 +1908,6 @@ public:
VkFramebuffer m_framebuffer = VK_NULL_HANDLE;
VkRenderPassBeginInfo m_passBeginInfo = {};
VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageLayout m_colorBindLayout[MAX_BIND_TEXS] = {};
VkImageLayout m_depthBindLayout[MAX_BIND_TEXS] = {};
VkSampler m_sampler = VK_NULL_HANDLE;
void setClampMode(TextureClampMode mode);
@ -1894,17 +1925,244 @@ public:
}
void initializeBindLayouts(VulkanContext* ctx) {
for (size_t i = 0; i < m_colorBindCount; ++i) {
SetImageLayout(ctx->m_loadCmdBuf, m_colorBindTex[i].m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1, 1);
m_colorBindLayout[i] = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
for (size_t i = 0; i < m_colorBindCount; ++i)
m_colorBindTex[i].toLayout(ctx->m_loadCmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
for (size_t i = 0; i < m_depthBindCount; ++i)
m_depthBindTex[i].toLayout(ctx->m_loadCmdBuf, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void toColorTransferSrcLayout(VkCommandBuffer cmdBuf) {
m_colorTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
}
void toDepthTransferSrcLayout(VkCommandBuffer cmdBuf) {
m_depthTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
}
void toColorAttachmentLayout(VkCommandBuffer cmdBuf) {
m_colorTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
}
void toDepthAttachmentLayout(VkCommandBuffer cmdBuf) {
m_depthTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
}
void toAttachmentLayout(VkCommandBuffer cmdBuf) {
toColorAttachmentLayout(cmdBuf);
toDepthAttachmentLayout(cmdBuf);
}
void toColorBindTransferDstLayout(VkCommandBuffer cmdBuf, int bindIdx) {
m_colorBindTex[bindIdx].toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
void toDepthBindTransferDstLayout(VkCommandBuffer cmdBuf, int bindIdx) {
m_depthBindTex[bindIdx].toLayout(cmdBuf, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
void toColorBindShaderReadLayout(VkCommandBuffer cmdBuf, int bindIdx) {
m_colorBindTex[bindIdx].toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void toDepthBindShaderReadLayout(VkCommandBuffer cmdBuf, int bindIdx) {
m_depthBindTex[bindIdx].toLayout(cmdBuf, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void commitLayouts() {
m_colorTex.commitLayout();
m_depthTex.commitLayout();
for (int i = 0; i < m_colorBindCount; ++i)
m_colorBindTex[i].commitLayout();
for (int i = 0; i < m_depthBindCount; ++i)
m_depthBindTex[i].commitLayout();
}
void rollbackLayouts() {
m_colorTex.rollbackLayout();
m_depthTex.rollbackLayout();
for (int i = 0; i < m_colorBindCount; ++i)
m_colorBindTex[i].rollbackLayout();
for (int i = 0; i < m_depthBindCount; ++i)
m_depthBindTex[i].rollbackLayout();
}
};
class VulkanTextureCubeR : public GraphicsDataNode<ITextureCubeR> {
friend class VulkanDataFactory;
friend struct VulkanCommandQueue;
VulkanCommandQueue* m_q;
size_t m_width;
size_t m_mipCount = 0;
void Setup(VulkanContext* ctx) {
/* no-ops on first call */
doDestroy();
setClampMode(TextureClampMode::Repeat);
/* color target */
VkImageCreateInfo texCreateInfo = {};
texCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
texCreateInfo.pNext = nullptr;
texCreateInfo.imageType = VK_IMAGE_TYPE_2D;
texCreateInfo.format = ctx->m_internalFormat;
texCreateInfo.extent.width = m_width;
texCreateInfo.extent.height = m_width;
texCreateInfo.extent.depth = 1;
texCreateInfo.mipLevels = 1;
texCreateInfo.arrayLayers = 6;
texCreateInfo.samples = VkSampleCountFlagBits(1);
texCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
texCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
texCreateInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
texCreateInfo.queueFamilyIndexCount = 0;
texCreateInfo.pQueueFamilyIndices = nullptr;
texCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
texCreateInfo.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
m_colorTex.createFB(ctx, &texCreateInfo);
/* depth target */
texCreateInfo.mipLevels = 1;
texCreateInfo.samples = VkSampleCountFlagBits(1);
texCreateInfo.format = VK_FORMAT_D32_SFLOAT;
texCreateInfo.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
m_depthTex.createFB(ctx, &texCreateInfo);
/* color bind target */
texCreateInfo.format = ctx->m_internalFormat;
texCreateInfo.mipLevels = m_mipCount;
texCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
m_colorBindTex.createFB(ctx, &texCreateInfo);
m_colorBindDescInfo.sampler = m_sampler;
m_colorBindDescInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
/* Create resource views */
VkImageViewCreateInfo viewCreateInfo = {};
viewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewCreateInfo.pNext = nullptr;
viewCreateInfo.image = m_colorBindTex.m_image;
viewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
viewCreateInfo.format = ctx->m_internalFormat;
viewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_R;
viewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_G;
viewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_B;
viewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_A;
viewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewCreateInfo.subresourceRange.baseMipLevel = 0;
viewCreateInfo.subresourceRange.levelCount = m_mipCount;
viewCreateInfo.subresourceRange.baseArrayLayer = 0;
viewCreateInfo.subresourceRange.layerCount = 6;
ThrowIfFailed(vk::CreateImageView(ctx->m_dev, &viewCreateInfo, nullptr, &m_colorBindView));
m_colorBindDescInfo.imageView = m_colorBindView;
viewCreateInfo.image = m_colorTex.m_image;
viewCreateInfo.subresourceRange.levelCount = 1;
viewCreateInfo.subresourceRange.layerCount = 1;
for (int i = 0; i < 6; ++i) {
viewCreateInfo.subresourceRange.baseArrayLayer = i;
ThrowIfFailed(vk::CreateImageView(ctx->m_dev, &viewCreateInfo, nullptr, &m_colorView[i]));
}
for (size_t i = 0; i < m_depthBindCount; ++i) {
SetImageLayout(ctx->m_loadCmdBuf, m_depthBindTex[i].m_image, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1, 1);
m_depthBindLayout[i] = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
viewCreateInfo.image = m_depthTex.m_image;
viewCreateInfo.format = VK_FORMAT_D32_SFLOAT;
viewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
for (int i = 0; i < 6; ++i) {
viewCreateInfo.subresourceRange.baseArrayLayer = i;
ThrowIfFailed(vk::CreateImageView(ctx->m_dev, &viewCreateInfo, nullptr, &m_depthView[i]));
}
/* framebuffer */
VkFramebufferCreateInfo fbCreateInfo = {};
fbCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fbCreateInfo.pNext = nullptr;
fbCreateInfo.renderPass = ctx->m_passOneSample;
fbCreateInfo.attachmentCount = 2;
fbCreateInfo.width = m_width;
fbCreateInfo.height = m_width;
fbCreateInfo.layers = 1;
VkImageView attachments[2] = {};
fbCreateInfo.pAttachments = attachments;
for (int i = 0; i < 6; ++i) {
attachments[0] = m_colorView[i];
attachments[1] = m_depthView[i];
ThrowIfFailed(vk::CreateFramebuffer(ctx->m_dev, &fbCreateInfo, nullptr, &m_framebuffer[i]));
auto& pbInfo = m_passBeginInfo[i];
pbInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
pbInfo.pNext = nullptr;
pbInfo.renderPass = ctx->m_passOneSample;
pbInfo.framebuffer = m_framebuffer[i];
pbInfo.renderArea.offset.x = 0;
pbInfo.renderArea.offset.y = 0;
pbInfo.renderArea.extent.width = m_width;
pbInfo.renderArea.extent.height = m_width;
pbInfo.clearValueCount = 0;
pbInfo.pClearValues = nullptr;
}
}
VulkanTextureCubeR(const boo::ObjToken<BaseGraphicsData>& parent, VulkanCommandQueue* q, size_t width, size_t mips);
public:
AllocatedImage m_colorTex;
VkImageView m_colorView[6] = {};
AllocatedImage m_depthTex;
VkImageView m_depthView[6] = {};
AllocatedImage m_colorBindTex;
VkImageView m_colorBindView = VK_NULL_HANDLE;
VkDescriptorImageInfo m_colorBindDescInfo = {};
VkFramebuffer m_framebuffer[6] = {};
VkRenderPassBeginInfo m_passBeginInfo[6] = {};
VkSampler m_sampler = VK_NULL_HANDLE;
void setClampMode(TextureClampMode mode);
void doDestroy();
~VulkanTextureCubeR();
void resize(VulkanContext* ctx, size_t width, size_t mips) {
if (width < 1)
width = 1;
m_width = width;
m_mipCount = mips;
Setup(ctx);
}
void toColorTransferSrcLayout(VkCommandBuffer cmdBuf) {
m_colorTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 6);
}
void toColorAttachmentLayout(VkCommandBuffer cmdBuf) {
m_colorTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, 6);
}
void toDepthAttachmentLayout(VkCommandBuffer cmdBuf) {
m_depthTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, 1, 6);
}
void toAttachmentLayout(VkCommandBuffer cmdBuf) {
toColorAttachmentLayout(cmdBuf);
toDepthAttachmentLayout(cmdBuf);
}
void toColorBindShaderReadLayout(VkCommandBuffer cmdBuf) {
m_colorBindTex.toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, m_mipCount, 6);
}
void commitLayouts() {
m_colorTex.commitLayout();
m_depthTex.commitLayout();
m_colorBindTex.commitLayout();
}
void rollbackLayouts() {
m_colorTex.rollbackLayout();
m_depthTex.rollbackLayout();
m_colorBindTex.rollbackLayout();
}
};
@ -2309,6 +2567,10 @@ static const VkDescriptorImageInfo* GetTextureGPUResource(const ITexture* tex, i
const VulkanTextureR* ctex = static_cast<const VulkanTextureR*>(tex);
return depth ? &ctex->m_depthBindDescInfo[bindIdx] : &ctex->m_colorBindDescInfo[bindIdx];
}
case TextureType::CubeRender: {
const VulkanTextureCubeR* ctex = static_cast<const VulkanTextureCubeR*>(tex);
return &ctex->m_colorBindDescInfo;
}
default:
break;
}
@ -2655,39 +2917,51 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
m_drawResTokens[m_fillBuf].push_back(binding.get());
}
boo::ObjToken<ITextureR> m_boundTarget;
boo::ObjToken<ITexture> m_boundTarget;
void setRenderTarget(const boo::ObjToken<ITextureR>& target) {
VulkanTextureR* ctarget = target.cast<VulkanTextureR>();
VkCommandBuffer cmdBuf = m_cmdBufs[m_fillBuf];
if (m_boundTarget.get() != ctarget) {
if (m_boundTarget) {
if (m_boundTarget)
vk::CmdEndRenderPass(cmdBuf);
VulkanTextureR* btarget = m_boundTarget.cast<VulkanTextureR>();
SetImageLayout(cmdBuf, btarget->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 1);
SetImageLayout(cmdBuf, btarget->m_depthTex.m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 1);
}
SetImageLayout(cmdBuf, ctarget->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT, ctarget->m_layout,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, 1);
SetImageLayout(cmdBuf, ctarget->m_depthTex.m_image, VK_IMAGE_ASPECT_DEPTH_BIT, ctarget->m_layout,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, 1, 1);
ctarget->m_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
ctarget->toAttachmentLayout(cmdBuf);
m_boundTarget = target;
m_boundTarget = target.get();
m_drawResTokens[m_fillBuf].push_back(target.get());
vk::CmdBeginRenderPass(cmdBuf, &ctarget->m_passBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
}
}
vk::CmdBeginRenderPass(cmdBuf, &ctarget->m_passBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
int m_boundFace = 0;
void setRenderTarget(const ObjToken<ITextureCubeR>& target, int face) {
VulkanTextureCubeR* ctarget = target.cast<VulkanTextureCubeR>();
VkCommandBuffer cmdBuf = m_cmdBufs[m_fillBuf];
if (m_boundTarget.get() != ctarget || m_boundFace != face) {
if (m_boundTarget)
vk::CmdEndRenderPass(cmdBuf);
ctarget->toAttachmentLayout(cmdBuf);
m_boundTarget = target.get();
m_boundFace = face;
m_drawResTokens[m_fillBuf].push_back(target.get());
vk::CmdBeginRenderPass(cmdBuf, &ctarget->m_passBeginInfo[face], VK_SUBPASS_CONTENTS_INLINE);
}
}
void setViewport(const SWindowRect& rect, float znear, float zfar) {
if (m_boundTarget) {
VulkanTextureR* ctarget = m_boundTarget.cast<VulkanTextureR>();
size_t texHeight = 0;
switch (m_boundTarget->type()) {
case TextureType::Render: texHeight = m_boundTarget.cast<VulkanTextureR>()->m_height; break;
case TextureType::CubeRender: texHeight = m_boundTarget.cast<VulkanTextureCubeR>()->m_width; break;
default: break;
}
VkViewport vp = {float(rect.location[0]),
float(std::max(0, int(ctarget->m_height) - rect.location[1] - rect.size[1])),
float(std::max(0, int(texHeight) - rect.location[1] - rect.size[1])),
float(rect.size[0]),
float(rect.size[1]),
znear,
@ -2698,9 +2972,14 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
void setScissor(const SWindowRect& rect) {
if (m_boundTarget) {
VulkanTextureR* ctarget = m_boundTarget.cast<VulkanTextureR>();
size_t texHeight = 0;
switch (m_boundTarget->type()) {
case TextureType::Render: texHeight = m_boundTarget.cast<VulkanTextureR>()->m_height; break;
case TextureType::CubeRender: texHeight = m_boundTarget.cast<VulkanTextureCubeR>()->m_width; break;
default: break;
}
VkRect2D vkrect = {
{int32_t(rect.location[0]), int32_t(std::max(0, int(ctarget->m_height) - rect.location[1] - rect.size[1]))},
{int32_t(rect.location[0]), int32_t(std::max(0, int(texHeight) - rect.location[1] - rect.size[1]))},
{uint32_t(rect.size[0]), uint32_t(rect.size[1])}};
vk::CmdSetScissor(m_cmdBufs[m_fillBuf], 0, 1, &vkrect);
}
@ -2713,6 +2992,86 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
m_drawResTokens[m_fillBuf].push_back(tex.get());
}
std::unordered_map<VulkanTextureCubeR*, std::pair<size_t, size_t>> m_cubeTexResizes;
void resizeRenderTexture(const boo::ObjToken<ITextureCubeR>& tex, size_t width, size_t mips) {
VulkanTextureCubeR* ctex = tex.cast<VulkanTextureCubeR>();
m_cubeTexResizes[ctex] = std::make_pair(width, mips);
m_drawResTokens[m_fillBuf].push_back(tex.get());
}
void generateMipmaps(const ObjToken<ITextureCubeR>& tex) {
VulkanTextureCubeR* ctex = tex.cast<VulkanTextureCubeR>();
VkCommandBuffer cmdBuf = m_cmdBufs[m_fillBuf];
if (m_boundTarget) {
vk::CmdEndRenderPass(cmdBuf);
m_boundTarget.reset();
}
ctex->toColorTransferSrcLayout(cmdBuf);
{
/* First blit performs y-inversion (can't easily invert the cube sampler or geometry) */
VkImageBlit blit = {};
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.srcSubresource.layerCount = 6;
blit.srcSubresource.mipLevel = 0;
blit.srcOffsets[1].x = int32_t(ctex->m_width);
blit.srcOffsets[1].y = int32_t(ctex->m_width);
blit.srcOffsets[1].z = 1;
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.dstSubresource.layerCount = 6;
blit.dstSubresource.mipLevel = 0;
blit.dstOffsets[0].y = int32_t(ctex->m_width);
blit.dstOffsets[1].x = int32_t(ctex->m_width);
blit.dstOffsets[1].z = 1;
SetImageLayout(cmdBuf, ctex->m_colorBindTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
ctex->m_colorBindTex.m_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 6, 0);
vk::CmdBlitImage(cmdBuf, ctex->m_colorTex.m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
ctex->m_colorBindTex.m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, VK_FILTER_LINEAR);
SetImageLayout(cmdBuf, ctex->m_colorBindTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 6, 0);
}
size_t tmpWidth = ctex->m_width;
for (int32_t i = 1; i < ctex->m_mipCount; i++) {
VkImageBlit blit = {};
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.srcSubresource.layerCount = 6;
blit.srcSubresource.mipLevel = i-1;
blit.srcOffsets[1].x = int32_t(tmpWidth);
blit.srcOffsets[1].y = int32_t(tmpWidth);
blit.srcOffsets[1].z = 1;
tmpWidth >>= 1;
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.dstSubresource.layerCount = 6;
blit.dstSubresource.mipLevel = i;
blit.dstOffsets[1].x = int32_t(tmpWidth);
blit.dstOffsets[1].y = int32_t(tmpWidth);
blit.dstOffsets[1].z = 1;
SetImageLayout(cmdBuf, ctex->m_colorBindTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
ctex->m_colorBindTex.m_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 6, i);
vk::CmdBlitImage(cmdBuf, ctex->m_colorBindTex.m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
ctex->m_colorBindTex.m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, VK_FILTER_LINEAR);
SetImageLayout(cmdBuf, ctex->m_colorBindTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 6, i);
}
ctex->m_colorBindTex.m_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
ctex->toColorBindShaderReadLayout(cmdBuf);
}
void schedulePostFrameHandler(std::function<void(void)>&& func) { func(); }
float m_clearColor[4] = {0.0, 0.0, 0.0, 0.0};
@ -2726,12 +3085,24 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
void clearTarget(bool render = true, bool depth = true) {
if (!m_boundTarget)
return;
VulkanTextureR* ctarget = m_boundTarget.cast<VulkanTextureR>();
VkClearAttachment clr[2] = {};
VkClearRect rect = {};
rect.layerCount = 1;
rect.rect.extent.width = ctarget->m_width;
rect.rect.extent.height = ctarget->m_height;
switch (m_boundTarget->type()) {
case TextureType::Render: {
VulkanTextureR* ctex = m_boundTarget.cast<VulkanTextureR>();
rect.rect.extent.width = ctex->m_width;
rect.rect.extent.height = ctex->m_height;
break;
}
case TextureType::CubeRender: {
VulkanTextureCubeR* ctex = m_boundTarget.cast<VulkanTextureCubeR>();
rect.rect.extent.width = ctex->m_width;
rect.rect.extent.height = ctex->m_width;
break;
}
default: break;
}
if (render && depth) {
clr[0].clearValue.color.float32[0] = m_clearColor[0];
@ -2806,6 +3177,7 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
gammaBinding->m_texs[0].tex.reset();
vk::CmdEndRenderPass(cmdBuf);
m_boundTarget.reset();
SetImageLayout(cmdBuf, dest.m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, 1, 1);
@ -2813,9 +3185,7 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
SetImageLayout(cmdBuf, dest.m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 1);
if (m_resolveDispSource == m_boundTarget)
SetImageLayout(cmdBuf, csource->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 1);
csource->toColorTransferSrcLayout(cmdBuf);
if (csource->m_samplesColor > 1) {
VkImageResolve resolveInfo = {};
@ -2851,10 +3221,6 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
SetImageLayout(cmdBuf, dest.m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, 1, 1);
if (m_resolveDispSource == m_boundTarget)
SetImageLayout(cmdBuf, csource->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, 1);
}
m_resolveDispSource.reset();
@ -2881,12 +3247,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
copyInfo.srcSubresource.baseArrayLayer = 0;
copyInfo.srcSubresource.layerCount = 1;
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 1);
ctexture->toColorTransferSrcLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_colorBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_COLOR_BIT,
ctexture->m_colorBindLayout[bindIdx], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 1);
ctexture->toColorBindTransferDstLayout(cmdBuf, bindIdx);
copyInfo.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyInfo.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
@ -2895,12 +3258,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
ctexture->m_colorBindTex[bindIdx].m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copyInfo);
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, 1);
ctexture->toColorAttachmentLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_colorBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1, 1);
ctexture->m_colorBindLayout[bindIdx] = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
ctexture->toColorBindShaderReadLayout(cmdBuf, bindIdx);
} else {
VkImageResolve resolveInfo = {};
SWindowRect intersectRect = rect.intersect(SWindowRect(0, 0, ctexture->m_width, ctexture->m_height));
@ -2918,12 +3278,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
resolveInfo.srcSubresource.baseArrayLayer = 0;
resolveInfo.srcSubresource.layerCount = 1;
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 1);
ctexture->toColorTransferSrcLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_colorBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_COLOR_BIT,
ctexture->m_colorBindLayout[bindIdx], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 1);
ctexture->toColorBindTransferDstLayout(cmdBuf, bindIdx);
resolveInfo.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveInfo.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
@ -2933,12 +3290,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
&resolveInfo);
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_colorTex.m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, 1);
ctexture->toColorAttachmentLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_colorBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1, 1);
ctexture->m_colorBindLayout[bindIdx] = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
ctexture->toColorBindShaderReadLayout(cmdBuf, bindIdx);
}
}
@ -2960,12 +3314,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
copyInfo.srcSubresource.baseArrayLayer = 0;
copyInfo.srcSubresource.layerCount = 1;
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_depthTex.m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 1);
ctexture->toDepthTransferSrcLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_depthBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
ctexture->m_depthBindLayout[bindIdx], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 1);
ctexture->toDepthBindTransferDstLayout(cmdBuf, bindIdx);
copyInfo.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
copyInfo.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
@ -2974,12 +3325,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
ctexture->m_depthBindTex[bindIdx].m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copyInfo);
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_depthTex.m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, 1, 1);
ctexture->toDepthAttachmentLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_depthBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1, 1);
ctexture->m_depthBindLayout[bindIdx] = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
ctexture->toDepthBindShaderReadLayout(cmdBuf, bindIdx);
} else {
VkImageResolve resolveInfo = {};
SWindowRect intersectRect = rect.intersect(SWindowRect(0, 0, ctexture->m_width, ctexture->m_height));
@ -2997,12 +3345,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
resolveInfo.srcSubresource.baseArrayLayer = 0;
resolveInfo.srcSubresource.layerCount = 1;
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_depthTex.m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1, 1);
ctexture->toDepthTransferSrcLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_depthBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
ctexture->m_depthBindLayout[bindIdx], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 1);
ctexture->toDepthBindTransferDstLayout(cmdBuf, bindIdx);
resolveInfo.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
resolveInfo.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
@ -3012,12 +3357,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
&resolveInfo);
if (ctexture == m_boundTarget.get())
SetImageLayout(cmdBuf, ctexture->m_depthTex.m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, 1, 1);
ctexture->toDepthAttachmentLayout(cmdBuf);
SetImageLayout(cmdBuf, ctexture->m_depthBindTex[bindIdx].m_image, VK_IMAGE_ASPECT_DEPTH_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1, 1);
ctexture->m_depthBindLayout[bindIdx] = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
ctexture->toDepthBindShaderReadLayout(cmdBuf, bindIdx);
}
}
}
@ -3029,7 +3371,13 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
vk::CmdEndRenderPass(cmdBuf);
_resolveBindTexture(cmdBuf, ctexture, rect, tlOrigin, bindIdx, color, depth);
vk::CmdBeginRenderPass(cmdBuf, &m_boundTarget.cast<VulkanTextureR>()->m_passBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
VkRenderPassBeginInfo* pbInfo = nullptr;
switch (m_boundTarget->type()) {
case TextureType::Render: pbInfo = &m_boundTarget.cast<VulkanTextureR>()->m_passBeginInfo; break;
case TextureType::CubeRender: pbInfo = &m_boundTarget.cast<VulkanTextureCubeR>()->m_passBeginInfo[m_boundFace]; break;
default: break;
}
vk::CmdBeginRenderPass(cmdBuf, pbInfo, VK_SUBPASS_CONTENTS_INLINE);
if (clearDepth) {
VkClearAttachment clr = {};
@ -3044,6 +3392,9 @@ struct VulkanCommandQueue : IGraphicsCommandQueue {
}
}
void _commitImageLayouts();
void _rollbackImageLayouts();
void execute();
};
@ -3078,6 +3429,14 @@ void VulkanTextureR::doDestroy() {
m_depthBindTex[i].destroy(m_q->m_ctx);
}
void VulkanTextureR::setClampMode(TextureClampMode mode) {
MakeSampler(m_q->m_ctx, m_sampler, mode, 1);
for (size_t i = 0; i < m_colorBindCount; ++i)
m_colorBindDescInfo[i].sampler = m_sampler;
for (size_t i = 0; i < m_depthBindCount; ++i)
m_depthBindDescInfo[i].sampler = m_sampler;
}
VulkanTextureR::VulkanTextureR(const boo::ObjToken<BaseGraphicsData>& parent, VulkanCommandQueue* q, size_t width,
size_t height, TextureClampMode clampMode, size_t colorBindCount, size_t depthBindCount)
: GraphicsDataNode<ITextureR>(parent)
@ -3119,12 +3478,56 @@ VulkanTextureR::~VulkanTextureR() {
m_depthBindTex[i].destroy(m_q->m_ctx);
}
void VulkanTextureR::setClampMode(TextureClampMode mode) {
MakeSampler(m_q->m_ctx, m_sampler, mode, 1);
for (size_t i = 0; i < m_colorBindCount; ++i)
m_colorBindDescInfo[i].sampler = m_sampler;
for (size_t i = 0; i < m_depthBindCount; ++i)
m_depthBindDescInfo[i].sampler = m_sampler;
void VulkanTextureCubeR::doDestroy() {
if (m_framebuffer[0]) {
for (int i = 0; i < 6; ++i)
vk::DestroyFramebuffer(m_q->m_ctx->m_dev, m_framebuffer[i], nullptr);
m_framebuffer[0] = VK_NULL_HANDLE;
}
if (m_colorBindView) {
vk::DestroyImageView(m_q->m_ctx->m_dev, m_colorBindView, nullptr);
m_colorBindView = VK_NULL_HANDLE;
}
m_colorBindTex.destroy(m_q->m_ctx);
if (m_colorView[0]) {
for (int i = 0; i < 6; ++i)
vk::DestroyImageView(m_q->m_ctx->m_dev, m_colorView[i], nullptr);
m_colorView[0] = VK_NULL_HANDLE;
}
m_colorTex.destroy(m_q->m_ctx);
if (m_depthView[0]) {
for (int i = 0; i < 6; ++i)
vk::DestroyImageView(m_q->m_ctx->m_dev, m_depthView[i], nullptr);
m_depthView[0] = VK_NULL_HANDLE;
}
m_depthTex.destroy(m_q->m_ctx);
}
void VulkanTextureCubeR::setClampMode(TextureClampMode mode) {
MakeSampler(m_q->m_ctx, m_sampler, mode, m_mipCount);
m_colorBindDescInfo.sampler = m_sampler;
}
VulkanTextureCubeR::VulkanTextureCubeR(const boo::ObjToken<BaseGraphicsData>& parent,
VulkanCommandQueue* q, size_t width, size_t mips)
: GraphicsDataNode<ITextureCubeR>(parent)
, m_q(q)
, m_width(width)
, m_mipCount(mips) {
Setup(q->m_ctx);
}
VulkanTextureCubeR::~VulkanTextureCubeR() {
for (int i = 0; i < 6; ++i)
vk::DestroyFramebuffer(m_q->m_ctx->m_dev, m_framebuffer[i], nullptr);
vk::DestroyImageView(m_q->m_ctx->m_dev, m_colorBindView, nullptr);
m_colorBindTex.destroy(m_q->m_ctx);
for (int i = 0; i < 6; ++i)
vk::DestroyImageView(m_q->m_ctx->m_dev, m_colorView[i], nullptr);
m_colorTex.destroy(m_q->m_ctx);
for (int i = 0; i < 6; ++i)
vk::DestroyImageView(m_q->m_ctx->m_dev, m_depthView[i], nullptr);
m_depthTex.destroy(m_q->m_ctx);
}
template <class DataCls>
@ -3169,8 +3572,8 @@ void VulkanTextureD::update(int b) {
/* copy staging data */
memmove(m_cpuBufPtrs[b], m_stagingBuf.get(), m_cpuSz);
SetImageLayout(cmdBuf, m_gpuTex[b].m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, 1);
m_gpuTex[b].toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
/* Put the copy command into the command buffer */
VkBufferImageCopy copyRegion = {};
@ -3188,8 +3591,8 @@ void VulkanTextureD::update(int b) {
/* Set the layout for the texture image from DESTINATION_OPTIMAL to
* SHADER_READ_ONLY */
SetImageLayout(cmdBuf, m_gpuTex[b].m_image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1, 1);
m_gpuTex[b].toLayout(cmdBuf, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
m_validSlots |= slot;
}
@ -3266,6 +3669,12 @@ boo::ObjToken<ITextureR> VulkanDataFactory::Context::newRenderTexture(size_t wid
return {new VulkanTextureR(m_data, q, width, height, clampMode, colorBindCount, depthBindCount)};
}
ObjToken<ITextureCubeR> VulkanDataFactory::Context::newCubeRenderTexture(size_t width, size_t mips) {
VulkanDataFactoryImpl& factory = static_cast<VulkanDataFactoryImpl&>(m_parent);
VulkanCommandQueue* q = static_cast<VulkanCommandQueue*>(factory.m_parent->getCommandQueue());
return {new VulkanTextureCubeR(m_data, q, width, mips)};
}
ObjToken<IShaderStage> VulkanDataFactory::Context::newShaderStage(const uint8_t* data, size_t size,
PipelineStage stage) {
VulkanDataFactoryImpl& factory = static_cast<VulkanDataFactoryImpl&>(m_parent);
@ -3466,6 +3875,34 @@ boo::ObjToken<IGraphicsBufferD> VulkanDataFactoryImpl::newPoolBuffer(BufferUse u
return {retval};
}
void VulkanCommandQueue::_commitImageLayouts() {
VulkanDataFactoryImpl* gfxF = static_cast<VulkanDataFactoryImpl*>(m_parent->getDataFactory());
if (gfxF->m_dataHead) {
for (BaseGraphicsData& d : *gfxF->m_dataHead) {
if (d.m_RTexs)
for (ITextureR& t : *d.m_RTexs)
static_cast<VulkanTextureR&>(t).commitLayouts();
if (d.m_CubeRTexs)
for (ITextureCubeR& t : *d.m_CubeRTexs)
static_cast<VulkanTextureCubeR&>(t).commitLayouts();
}
}
}
void VulkanCommandQueue::_rollbackImageLayouts() {
VulkanDataFactoryImpl* gfxF = static_cast<VulkanDataFactoryImpl*>(m_parent->getDataFactory());
if (gfxF->m_dataHead) {
for (BaseGraphicsData& d : *gfxF->m_dataHead) {
if (d.m_RTexs)
for (ITextureR& t : *d.m_RTexs)
static_cast<VulkanTextureR&>(t).rollbackLayouts();
if (d.m_CubeRTexs)
for (ITextureCubeR& t : *d.m_CubeRTexs)
static_cast<VulkanTextureCubeR&>(t).rollbackLayouts();
}
}
}
void VulkanCommandQueue::execute() {
if (!m_running)
return;
@ -3514,11 +3951,13 @@ void VulkanCommandQueue::execute() {
}
vk::CmdEndRenderPass(m_cmdBufs[m_fillBuf]);
m_boundTarget.reset();
/* Check on fence */
if (m_submitted && vk::GetFenceStatus(m_ctx->m_dev, m_drawCompleteFence) == VK_NOT_READY) {
/* Abandon this list (renderer too slow) */
resetCommandBuffer();
_rollbackImageLayouts();
m_dynamicNeedsReset = true;
m_resolveDispSource = nullptr;
@ -3532,13 +3971,11 @@ void VulkanCommandQueue::execute() {
/* Perform texture and swap-chain resizes */
if (m_ctx->_resizeSwapChains() || m_texResizes.size()) {
for (const auto& resize : m_texResizes) {
if (m_boundTarget.get() == resize.first)
m_boundTarget.reset();
for (const auto& resize : m_texResizes)
resize.first->resize(m_ctx, resize.second.first, resize.second.second);
}
m_texResizes.clear();
resetCommandBuffer();
_rollbackImageLayouts();
m_dynamicNeedsReset = true;
m_resolveDispSource = nullptr;
return;
@ -3590,6 +4027,7 @@ void VulkanCommandQueue::execute() {
resetCommandBuffer();
resetDynamicCommandBuffer();
_commitImageLayouts();
}
std::unique_ptr<IGraphicsCommandQueue> _NewVulkanCommandQueue(VulkanContext* ctx, VulkanContext::Window* windowCtx,