2018-10-07 03:36:44 +00:00
|
|
|
#pragma once
|
2017-01-20 03:52:40 +00:00
|
|
|
|
|
|
|
/* Private header for managing shader data
|
|
|
|
* binding lifetimes through rendering cycle */
|
|
|
|
|
2019-08-17 17:39:39 +00:00
|
|
|
#include <array>
|
2017-01-21 00:19:18 +00:00
|
|
|
#include <atomic>
|
2017-12-03 06:05:16 +00:00
|
|
|
#include <cassert>
|
2019-06-16 06:24:28 +00:00
|
|
|
#include <condition_variable>
|
2020-09-15 21:08:04 +00:00
|
|
|
#include <chrono>
|
2019-08-17 17:39:39 +00:00
|
|
|
#include <mutex>
|
2019-06-16 06:24:28 +00:00
|
|
|
#include <queue>
|
2019-08-17 17:39:39 +00:00
|
|
|
#include <thread>
|
|
|
|
#include <vector>
|
|
|
|
|
2021-04-04 11:05:17 +00:00
|
|
|
#include <optick.h>
|
2017-01-20 03:52:40 +00:00
|
|
|
#include "boo/graphicsdev/IGraphicsDataFactory.hpp"
|
2019-07-21 08:41:07 +00:00
|
|
|
#include "boo/graphicsdev/IGraphicsCommandQueue.hpp"
|
2019-08-19 23:08:54 +00:00
|
|
|
#include "lib/Common.hpp"
|
2017-01-20 03:52:40 +00:00
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
namespace boo {
|
2017-01-20 03:52:40 +00:00
|
|
|
|
2017-11-03 09:39:26 +00:00
|
|
|
struct BaseGraphicsData;
|
|
|
|
struct BaseGraphicsPool;
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
template <class NodeCls, class DataCls = BaseGraphicsData>
|
2017-11-03 09:39:26 +00:00
|
|
|
struct GraphicsDataNode;
|
|
|
|
|
|
|
|
/** Inherited by data factory implementations to track the head data and pool nodes */
|
2018-12-08 05:17:51 +00:00
|
|
|
struct GraphicsDataFactoryHead {
|
|
|
|
std::recursive_mutex m_dataMutex;
|
|
|
|
BaseGraphicsData* m_dataHead = nullptr;
|
|
|
|
BaseGraphicsPool* m_poolHead = nullptr;
|
|
|
|
|
|
|
|
~GraphicsDataFactoryHead() {
|
|
|
|
assert(m_dataHead == nullptr && "Dangling graphics data pools detected");
|
|
|
|
assert(m_poolHead == nullptr && "Dangling graphics data pools detected");
|
|
|
|
}
|
2017-11-05 06:12:49 +00:00
|
|
|
};
|
|
|
|
|
2017-11-03 09:39:26 +00:00
|
|
|
/** Private generalized data container class.
|
|
|
|
* Keeps head pointers to all graphics objects by type
|
|
|
|
*/
|
2018-12-08 05:17:51 +00:00
|
|
|
struct BaseGraphicsData : ListNode<BaseGraphicsData, GraphicsDataFactoryHead*> {
|
|
|
|
static BaseGraphicsData*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_dataHead; }
|
|
|
|
static std::unique_lock<std::recursive_mutex> _getHeadLock(GraphicsDataFactoryHead* head) {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
__BooTraceFields
|
|
|
|
|
2019-06-01 03:39:55 +00:00
|
|
|
GraphicsDataNode<IShaderStage, BaseGraphicsData>* m_Ss = nullptr;
|
2018-12-08 05:17:51 +00:00
|
|
|
GraphicsDataNode<IShaderPipeline, BaseGraphicsData>* m_SPs = nullptr;
|
|
|
|
GraphicsDataNode<IShaderDataBinding, BaseGraphicsData>* m_SBinds = nullptr;
|
|
|
|
GraphicsDataNode<IGraphicsBufferS, BaseGraphicsData>* m_SBufs = nullptr;
|
|
|
|
GraphicsDataNode<IGraphicsBufferD, BaseGraphicsData>* m_DBufs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureS, BaseGraphicsData>* m_STexs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureSA, BaseGraphicsData>* m_SATexs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureD, BaseGraphicsData>* m_DTexs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureR, BaseGraphicsData>* m_RTexs = nullptr;
|
2019-06-01 03:39:55 +00:00
|
|
|
GraphicsDataNode<ITextureCubeR, BaseGraphicsData>* m_CubeRTexs = nullptr;
|
2018-12-08 05:17:51 +00:00
|
|
|
template <class T>
|
|
|
|
GraphicsDataNode<T, BaseGraphicsData>*& getHead();
|
|
|
|
template <class T>
|
|
|
|
size_t countForward() {
|
|
|
|
auto* head = getHead<T>();
|
|
|
|
return head ? head->countForward() : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit BaseGraphicsData(GraphicsDataFactoryHead& head __BooTraceArgs)
|
|
|
|
: ListNode<BaseGraphicsData, GraphicsDataFactoryHead*>(&head) __BooTraceInitializer {}
|
2017-01-20 03:52:40 +00:00
|
|
|
};
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IShaderStage, BaseGraphicsData>*& BaseGraphicsData::getHead<IShaderStage>() {
|
|
|
|
return m_Ss;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IShaderPipeline, BaseGraphicsData>*& BaseGraphicsData::getHead<IShaderPipeline>() {
|
|
|
|
return m_SPs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IShaderDataBinding, BaseGraphicsData>*& BaseGraphicsData::getHead<IShaderDataBinding>() {
|
|
|
|
return m_SBinds;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IGraphicsBufferS, BaseGraphicsData>*& BaseGraphicsData::getHead<IGraphicsBufferS>() {
|
|
|
|
return m_SBufs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IGraphicsBufferD, BaseGraphicsData>*& BaseGraphicsData::getHead<IGraphicsBufferD>() {
|
|
|
|
return m_DBufs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureS, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureS>() {
|
|
|
|
return m_STexs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureSA, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureSA>() {
|
|
|
|
return m_SATexs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureD, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureD>() {
|
|
|
|
return m_DTexs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureR, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureR>() {
|
|
|
|
return m_RTexs;
|
|
|
|
}
|
2019-06-01 03:39:55 +00:00
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureCubeR, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureCubeR>() {
|
|
|
|
return m_CubeRTexs;
|
|
|
|
}
|
2017-11-03 09:39:26 +00:00
|
|
|
|
|
|
|
/** Private generalized pool container class.
|
2017-11-05 06:12:49 +00:00
|
|
|
* Keeps head pointer to exactly one dynamic buffer while otherwise conforming to BaseGraphicsData
|
2017-11-03 09:39:26 +00:00
|
|
|
*/
|
2018-12-08 05:17:51 +00:00
|
|
|
struct BaseGraphicsPool : ListNode<BaseGraphicsPool, GraphicsDataFactoryHead*> {
|
|
|
|
static BaseGraphicsPool*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_poolHead; }
|
|
|
|
static std::unique_lock<std::recursive_mutex> _getHeadLock(GraphicsDataFactoryHead* head) {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
__BooTraceFields
|
|
|
|
|
2019-06-01 03:39:55 +00:00
|
|
|
GraphicsDataNode<IGraphicsBufferD, BaseGraphicsPool>* m_DBufs = nullptr;
|
2018-12-08 05:17:51 +00:00
|
|
|
template <class T>
|
|
|
|
GraphicsDataNode<T, BaseGraphicsPool>*& getHead();
|
|
|
|
template <class T>
|
|
|
|
size_t countForward() {
|
|
|
|
auto* head = getHead<T>();
|
|
|
|
return head ? head->countForward() : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit BaseGraphicsPool(GraphicsDataFactoryHead& head __BooTraceArgs)
|
|
|
|
: ListNode<BaseGraphicsPool, GraphicsDataFactoryHead*>(&head) __BooTraceInitializer {}
|
2017-11-03 09:39:26 +00:00
|
|
|
};
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IGraphicsBufferD, BaseGraphicsPool>*& BaseGraphicsPool::getHead<IGraphicsBufferD>() {
|
|
|
|
return m_DBufs;
|
|
|
|
}
|
2017-11-03 09:39:26 +00:00
|
|
|
|
|
|
|
/** Private generalised graphics object node.
|
|
|
|
* Keeps a strong reference to the data pool that it's a member of;
|
|
|
|
* as well as doubly-linked pointers to same-type sibling objects
|
|
|
|
*/
|
2018-12-08 05:17:51 +00:00
|
|
|
template <class NodeCls, class DataCls>
|
|
|
|
struct GraphicsDataNode : ListNode<GraphicsDataNode<NodeCls, DataCls>, ObjToken<DataCls>, NodeCls> {
|
|
|
|
using base = ListNode<GraphicsDataNode<NodeCls, DataCls>, ObjToken<DataCls>, NodeCls>;
|
|
|
|
static GraphicsDataNode<NodeCls, DataCls>*& _getHeadPtr(ObjToken<DataCls>& head) {
|
|
|
|
return head->template getHead<NodeCls>();
|
|
|
|
}
|
|
|
|
static std::unique_lock<std::recursive_mutex> _getHeadLock(ObjToken<DataCls>& head) {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{head->m_head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit GraphicsDataNode(const ObjToken<DataCls>& data)
|
|
|
|
: ListNode<GraphicsDataNode<NodeCls, DataCls>, ObjToken<DataCls>, NodeCls>(data) {}
|
|
|
|
|
|
|
|
class iterator {
|
|
|
|
GraphicsDataNode<NodeCls, DataCls>* m_node;
|
|
|
|
|
|
|
|
public:
|
|
|
|
using iterator_category = std::bidirectional_iterator_tag;
|
|
|
|
using value_type = NodeCls;
|
|
|
|
using difference_type = std::ptrdiff_t;
|
|
|
|
using pointer = NodeCls*;
|
|
|
|
using reference = NodeCls&;
|
|
|
|
|
|
|
|
explicit iterator(GraphicsDataNode<NodeCls, DataCls>* node) : m_node(node) {}
|
|
|
|
NodeCls& operator*() const { return *m_node; }
|
|
|
|
bool operator!=(const iterator& other) const { return m_node != other.m_node; }
|
|
|
|
iterator& operator++() {
|
|
|
|
m_node = m_node->m_next;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
iterator& operator--() {
|
|
|
|
m_node = m_node->m_prev;
|
|
|
|
return *this;
|
2017-11-06 06:53:54 +00:00
|
|
|
}
|
2018-12-08 05:17:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
iterator begin() { return iterator(this); }
|
|
|
|
iterator end() { return iterator(nullptr); }
|
|
|
|
|
|
|
|
size_t countForward() {
|
|
|
|
size_t ret = 0;
|
|
|
|
for (auto& n : *this)
|
|
|
|
++ret;
|
|
|
|
return ret;
|
|
|
|
}
|
2017-01-20 03:52:40 +00:00
|
|
|
};
|
|
|
|
|
2018-01-20 03:02:29 +00:00
|
|
|
void UpdateGammaLUT(ITextureD* tex, float gamma);
|
|
|
|
|
2019-06-16 06:24:28 +00:00
|
|
|
/** Generic work-queue for asynchronously building shader pipelines on supported backends
|
|
|
|
*/
|
|
|
|
template <class ShaderPipelineType>
|
|
|
|
class PipelineCompileQueue {
|
|
|
|
struct Task {
|
|
|
|
ObjToken<IShaderPipeline> m_pipeline;
|
|
|
|
explicit Task(ObjToken<IShaderPipeline> pipeline) : m_pipeline(pipeline) {}
|
|
|
|
void run() {
|
|
|
|
m_pipeline.cast<ShaderPipelineType>()->compile();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
std::queue<Task> m_tasks;
|
2019-06-16 07:12:50 +00:00
|
|
|
std::atomic_size_t m_outstandingTasks = 0;
|
2019-06-16 06:24:28 +00:00
|
|
|
std::vector<std::thread> m_threads;
|
|
|
|
std::mutex m_mt;
|
|
|
|
std::condition_variable m_cv, m_backcv;
|
2019-06-16 07:12:50 +00:00
|
|
|
std::atomic_bool m_running = true;
|
2019-06-16 06:24:28 +00:00
|
|
|
|
|
|
|
void worker() {
|
|
|
|
std::unique_lock<std::mutex> lk(m_mt);
|
|
|
|
while (m_running) {
|
|
|
|
m_cv.wait(lk, [this]() { return !m_tasks.empty() || !m_running; });
|
|
|
|
if (!m_running)
|
|
|
|
break;
|
|
|
|
Task t = std::move(m_tasks.front());
|
|
|
|
m_tasks.pop();
|
|
|
|
lk.unlock();
|
|
|
|
t.run();
|
|
|
|
lk.lock();
|
|
|
|
--m_outstandingTasks;
|
|
|
|
m_backcv.notify_all();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
void addPipeline(ObjToken<IShaderPipeline> pipeline) {
|
|
|
|
std::lock_guard<std::mutex> lk(m_mt);
|
|
|
|
m_tasks.emplace(pipeline);
|
|
|
|
++m_outstandingTasks;
|
|
|
|
m_cv.notify_one();
|
|
|
|
}
|
|
|
|
|
|
|
|
void waitUntilReady() {
|
|
|
|
std::unique_lock<std::mutex> lk(m_mt);
|
|
|
|
m_backcv.wait(lk, [this]() { return m_outstandingTasks == 0 || !m_running; });
|
|
|
|
}
|
|
|
|
|
2019-08-16 06:06:51 +00:00
|
|
|
bool isReady() const {
|
2019-06-21 06:01:27 +00:00
|
|
|
return m_outstandingTasks == 0 || !m_running;
|
|
|
|
}
|
|
|
|
|
2019-06-16 06:24:28 +00:00
|
|
|
PipelineCompileQueue() {
|
|
|
|
unsigned int numThreads = std::thread::hardware_concurrency();
|
|
|
|
if (numThreads > 1)
|
|
|
|
--numThreads;
|
|
|
|
m_threads.reserve(numThreads);
|
|
|
|
for (unsigned int i = 0; i < numThreads; ++i)
|
|
|
|
m_threads.emplace_back(std::bind(&PipelineCompileQueue::worker, this));
|
|
|
|
}
|
|
|
|
|
|
|
|
~PipelineCompileQueue() {
|
|
|
|
m_running = false;
|
|
|
|
m_cv.notify_all();
|
|
|
|
for (auto& t : m_threads) t.join();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-07-21 08:41:07 +00:00
|
|
|
#ifdef BOO_GRAPHICS_DEBUG_GROUPS
|
|
|
|
template <typename CommandQueue>
|
|
|
|
class GraphicsDebugGroup {
|
|
|
|
/* Stack only */
|
|
|
|
void* operator new(size_t);
|
|
|
|
void operator delete(void*);
|
|
|
|
void* operator new[](size_t);
|
|
|
|
void operator delete[](void*);
|
|
|
|
CommandQueue* m_q;
|
|
|
|
public:
|
|
|
|
explicit GraphicsDebugGroup(CommandQueue* q, const char* name,
|
|
|
|
const std::array<float, 4>& color = {1.f, 1.f, 1.f, 1.f}) : m_q(q) {
|
|
|
|
m_q->pushDebugGroup(name, color);
|
|
|
|
}
|
|
|
|
~GraphicsDebugGroup() {
|
|
|
|
m_q->popDebugGroup();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
#define SCOPED_GRAPHICS_DEBUG_GROUP(...) GraphicsDebugGroup _GfxDbg_(__VA_ARGS__);
|
|
|
|
#else
|
2021-04-04 11:05:17 +00:00
|
|
|
#define SCOPED_GRAPHICS_DEBUG_GROUP(_, name, ...) OPTICK_EVENT(name)
|
2019-07-21 08:41:07 +00:00
|
|
|
#endif
|
|
|
|
|
2020-09-15 21:08:04 +00:00
|
|
|
class Limiter {
|
|
|
|
using delta_clock = std::chrono::steady_clock;
|
|
|
|
using nanotime_t = std::chrono::nanoseconds::rep;
|
|
|
|
|
|
|
|
public:
|
|
|
|
void Sleep(nanotime_t targetFrameTimeNs);
|
|
|
|
|
|
|
|
private:
|
|
|
|
delta_clock::time_point m_oldTime;
|
|
|
|
std::array<nanotime_t, 4> m_overheadTimes{};
|
|
|
|
size_t m_overheadTimeIdx = 0;
|
|
|
|
nanotime_t m_overhead = 0;
|
|
|
|
|
|
|
|
nanotime_t TimeSince(delta_clock::time_point start) {
|
|
|
|
return std::chrono::duration_cast<std::chrono::nanoseconds>(delta_clock::now() - start).count();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
} // namespace boo
|