#pragma once /* Private header for managing shader data * binding lifetimes through rendering cycle */ #include #include #include #include #include #include #include #include #include #include "boo/graphicsdev/IGraphicsDataFactory.hpp" #include "boo/graphicsdev/IGraphicsCommandQueue.hpp" #include "lib/Common.hpp" namespace boo { struct BaseGraphicsData; struct BaseGraphicsPool; template struct GraphicsDataNode; /** Inherited by data factory implementations to track the head data and pool nodes */ struct GraphicsDataFactoryHead { std::recursive_mutex m_dataMutex; BaseGraphicsData* m_dataHead = nullptr; BaseGraphicsPool* m_poolHead = nullptr; ~GraphicsDataFactoryHead() { assert(m_dataHead == nullptr && "Dangling graphics data pools detected"); assert(m_poolHead == nullptr && "Dangling graphics data pools detected"); } }; /** Private generalized data container class. * Keeps head pointers to all graphics objects by type */ struct BaseGraphicsData : ListNode { static BaseGraphicsData*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_dataHead; } static std::unique_lock _getHeadLock(GraphicsDataFactoryHead* head) { return std::unique_lock{head->m_dataMutex}; } __BooTraceFields GraphicsDataNode* m_Ss = nullptr; GraphicsDataNode* m_SPs = nullptr; GraphicsDataNode* m_SBinds = nullptr; GraphicsDataNode* m_SBufs = nullptr; GraphicsDataNode* m_DBufs = nullptr; GraphicsDataNode* m_STexs = nullptr; GraphicsDataNode* m_SATexs = nullptr; GraphicsDataNode* m_DTexs = nullptr; GraphicsDataNode* m_RTexs = nullptr; GraphicsDataNode* m_CubeRTexs = nullptr; template GraphicsDataNode*& getHead(); template size_t countForward() { auto* head = getHead(); return head ? head->countForward() : 0; } explicit BaseGraphicsData(GraphicsDataFactoryHead& head __BooTraceArgs) : ListNode(&head) __BooTraceInitializer {} }; template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_Ss; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SPs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SBinds; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SBufs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_DBufs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_STexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_SATexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_DTexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_RTexs; } template <> inline GraphicsDataNode*& BaseGraphicsData::getHead() { return m_CubeRTexs; } /** Private generalized pool container class. * Keeps head pointer to exactly one dynamic buffer while otherwise conforming to BaseGraphicsData */ struct BaseGraphicsPool : ListNode { static BaseGraphicsPool*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_poolHead; } static std::unique_lock _getHeadLock(GraphicsDataFactoryHead* head) { return std::unique_lock{head->m_dataMutex}; } __BooTraceFields GraphicsDataNode* m_DBufs = nullptr; template GraphicsDataNode*& getHead(); template size_t countForward() { auto* head = getHead(); return head ? head->countForward() : 0; } explicit BaseGraphicsPool(GraphicsDataFactoryHead& head __BooTraceArgs) : ListNode(&head) __BooTraceInitializer {} }; template <> inline GraphicsDataNode*& BaseGraphicsPool::getHead() { return m_DBufs; } /** Private generalised graphics object node. * Keeps a strong reference to the data pool that it's a member of; * as well as doubly-linked pointers to same-type sibling objects */ template struct GraphicsDataNode : ListNode, ObjToken, NodeCls> { using base = ListNode, ObjToken, NodeCls>; static GraphicsDataNode*& _getHeadPtr(ObjToken& head) { return head->template getHead(); } static std::unique_lock _getHeadLock(ObjToken& head) { return std::unique_lock{head->m_head->m_dataMutex}; } explicit GraphicsDataNode(const ObjToken& data) : ListNode, ObjToken, NodeCls>(data) {} class iterator { GraphicsDataNode* m_node; public: using iterator_category = std::bidirectional_iterator_tag; using value_type = NodeCls; using difference_type = std::ptrdiff_t; using pointer = NodeCls*; using reference = NodeCls&; explicit iterator(GraphicsDataNode* node) : m_node(node) {} NodeCls& operator*() const { return *m_node; } bool operator!=(const iterator& other) const { return m_node != other.m_node; } iterator& operator++() { m_node = m_node->m_next; return *this; } iterator& operator--() { m_node = m_node->m_prev; return *this; } }; iterator begin() { return iterator(this); } iterator end() { return iterator(nullptr); } size_t countForward() { size_t ret = 0; for (auto& n : *this) ++ret; return ret; } }; void UpdateGammaLUT(ITextureD* tex, float gamma); /** Generic work-queue for asynchronously building shader pipelines on supported backends */ template class PipelineCompileQueue { struct Task { ObjToken m_pipeline; explicit Task(ObjToken pipeline) : m_pipeline(pipeline) {} void run() { m_pipeline.cast()->compile(); } }; std::queue m_tasks; std::atomic_size_t m_outstandingTasks = 0; std::vector m_threads; std::mutex m_mt; std::condition_variable m_cv, m_backcv; std::atomic_bool m_running = true; void worker() { std::unique_lock lk(m_mt); while (m_running) { m_cv.wait(lk, [this]() { return !m_tasks.empty() || !m_running; }); if (!m_running) break; Task t = std::move(m_tasks.front()); m_tasks.pop(); lk.unlock(); t.run(); lk.lock(); --m_outstandingTasks; m_backcv.notify_all(); } } public: void addPipeline(ObjToken pipeline) { std::lock_guard lk(m_mt); m_tasks.emplace(pipeline); ++m_outstandingTasks; m_cv.notify_one(); } void waitUntilReady() { std::unique_lock lk(m_mt); m_backcv.wait(lk, [this]() { return m_outstandingTasks == 0 || !m_running; }); } bool isReady() const { return m_outstandingTasks == 0 || !m_running; } PipelineCompileQueue() { unsigned int numThreads = std::thread::hardware_concurrency(); if (numThreads > 1) --numThreads; m_threads.reserve(numThreads); for (unsigned int i = 0; i < numThreads; ++i) m_threads.emplace_back(std::bind(&PipelineCompileQueue::worker, this)); } ~PipelineCompileQueue() { m_running = false; m_cv.notify_all(); for (auto& t : m_threads) t.join(); } }; #ifdef BOO_GRAPHICS_DEBUG_GROUPS template class GraphicsDebugGroup { /* Stack only */ void* operator new(size_t); void operator delete(void*); void* operator new[](size_t); void operator delete[](void*); CommandQueue* m_q; public: explicit GraphicsDebugGroup(CommandQueue* q, const char* name, const std::array& color = {1.f, 1.f, 1.f, 1.f}) : m_q(q) { m_q->pushDebugGroup(name, color); } ~GraphicsDebugGroup() { m_q->popDebugGroup(); } }; #define SCOPED_GRAPHICS_DEBUG_GROUP(...) GraphicsDebugGroup _GfxDbg_(__VA_ARGS__); #else #define SCOPED_GRAPHICS_DEBUG_GROUP(...) #endif class Limiter { using delta_clock = std::chrono::steady_clock; using nanotime_t = std::chrono::nanoseconds::rep; public: void Sleep(nanotime_t targetFrameTimeNs); private: delta_clock::time_point m_oldTime; std::array m_overheadTimes{}; size_t m_overheadTimeIdx = 0; nanotime_t m_overhead = 0; nanotime_t TimeSince(delta_clock::time_point start) { return std::chrono::duration_cast(delta_clock::now() - start).count(); } }; } // namespace boo