#ifndef BOO_GRAPHICSDEV_COMMON_HPP #define BOO_GRAPHICSDEV_COMMON_HPP /* Private header for managing shader data * binding lifetimes through rendering cycle */ #include #include #include #include #include "boo/graphicsdev/IGraphicsDataFactory.hpp" #include "../Common.hpp" namespace boo { struct BaseGraphicsData; struct BaseGraphicsPool; template struct GraphicsDataNode; /** Inherited by data factory implementations to track the head data and pool nodes */ struct GraphicsDataFactoryHead { std::recursive_mutex m_dataMutex; BaseGraphicsData* m_dataHead = nullptr; BaseGraphicsPool* m_poolHead = nullptr; ~GraphicsDataFactoryHead() { assert(m_dataHead == nullptr && "Dangling graphics data pools detected"); assert(m_poolHead == nullptr && "Dangling graphics data pools detected"); } }; /** Private generalized data container class. * Keeps head pointers to all graphics objects by type */ struct BaseGraphicsData : ListNode { static BaseGraphicsData*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_dataHead; } static std::unique_lock _getHeadLock(GraphicsDataFactoryHead* head) { return std::unique_lock{head->m_dataMutex}; } __BooTraceFields GraphicsDataNode* m_SPs = nullptr; GraphicsDataNode* m_SBinds = nullptr; GraphicsDataNode* m_SBufs = nullptr; GraphicsDataNode* m_DBufs = nullptr; GraphicsDataNode* m_STexs = nullptr; GraphicsDataNode* m_SATexs = nullptr; GraphicsDataNode* m_DTexs = nullptr; GraphicsDataNode* m_RTexs = nullptr; GraphicsDataNode* m_VFmts = nullptr; template GraphicsDataNode*& getHead(); template size_t countForward() { auto* head = getHead(); return head ? head->countForward() : 0; } std::unique_lock destructorLock() override { return std::unique_lock{m_head->m_dataMutex}; } explicit BaseGraphicsData(GraphicsDataFactoryHead& head __BooTraceArgs) : ListNode(&head) __BooTraceInitializer {} }; template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SPs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SBinds; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SBufs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_DBufs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_STexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SATexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_DTexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_RTexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_VFmts; } /** Private generalized pool container class. * Keeps head pointer to exactly one dynamic buffer while otherwise conforming to BaseGraphicsData */ struct BaseGraphicsPool : ListNode { static BaseGraphicsPool*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_poolHead; } static std::unique_lock _getHeadLock(GraphicsDataFactoryHead* head) { return std::unique_lock{head->m_dataMutex}; } __BooTraceFields GraphicsDataNode* m_DBufs = nullptr; template GraphicsDataNode*& getHead(); template size_t countForward() { auto* head = getHead(); return head ? head->countForward() : 0; } std::unique_lock destructorLock() override { return std::unique_lock{m_head->m_dataMutex}; } explicit BaseGraphicsPool(GraphicsDataFactoryHead& head __BooTraceArgs) : ListNode(&head) __BooTraceInitializer {} }; template <> inline GraphicsDataNode*& BaseGraphicsPool::getHead() { return m_DBufs; } /** Private generalised graphics object node. * Keeps a strong reference to the data pool that it's a member of; * as well as doubly-linked pointers to same-type sibling objects */ template struct GraphicsDataNode : ListNode, ObjToken, NodeCls> { using base = ListNode, ObjToken, NodeCls>; static GraphicsDataNode*& _getHeadPtr(ObjToken& head) { return head->template getHead(); } static std::unique_lock _getHeadLock(ObjToken& head) { return std::unique_lock{head->m_head->m_dataMutex}; } std::unique_lock destructorLock() override { return std::unique_lock{base::m_head->m_head->m_dataMutex}; } explicit GraphicsDataNode(const ObjToken& data) : ListNode, ObjToken, NodeCls>(data) {} class iterator { GraphicsDataNode* m_node; public: using iterator_category = std::bidirectional_iterator_tag; using value_type = NodeCls; using difference_type = std::ptrdiff_t; using pointer = NodeCls*; using reference = NodeCls&; explicit iterator(GraphicsDataNode* node) : m_node(node) {} NodeCls& operator*() const { return *m_node; } bool operator!=(const iterator& other) const { return m_node != other.m_node; } iterator& operator++() { m_node = m_node->m_next; return *this; } iterator& operator--() { m_node = m_node->m_prev; return *this; } }; iterator begin() { return iterator(this); } iterator end() { return iterator(nullptr); } size_t countForward() { size_t ret = 0; for (auto& n : *this) ++ret; return ret; } }; /** Hash table entry for owning sharable shader objects */ template class IShareableShader { std::atomic_int m_refCount = {0}; FactoryImpl& m_factory; uint64_t m_srckey, m_binKey; public: IShareableShader(FactoryImpl& factory, uint64_t srcKey, uint64_t binKey) : m_factory(factory), m_srckey(srcKey), m_binKey(binKey) {} void increment() { m_refCount++; } void decrement() { if (m_refCount.fetch_sub(1) == 1) m_factory._unregisterShareableShader(m_srckey, m_binKey); } class Token { IShareableShader* m_parent = nullptr; public: Token() = default; Token(IShareableShader* p) : m_parent(p) { m_parent->increment(); } Token& operator=(const Token&) = delete; Token(const Token&) = delete; Token& operator=(Token&& other) { m_parent = other.m_parent; other.m_parent = nullptr; return *this; } Token(Token&& other) { m_parent = other.m_parent; other.m_parent = nullptr; } void reset() { if (m_parent) m_parent->decrement(); m_parent = nullptr; } ~Token() { if (m_parent) m_parent->decrement(); } operator bool() const { return m_parent != nullptr; } ShaderImpl& get() const { return static_cast(*m_parent); } }; Token lock() { return Token(this); } }; void UpdateGammaLUT(ITextureD* tex, float gamma); } #endif // BOO_GRAPHICSDEV_COMMON_HPP