2018-10-07 03:36:44 +00:00
|
|
|
#pragma once
|
2017-01-20 03:52:40 +00:00
|
|
|
|
|
|
|
/* Private header for managing shader data
|
|
|
|
* binding lifetimes through rendering cycle */
|
|
|
|
|
2017-01-21 00:19:18 +00:00
|
|
|
#include <atomic>
|
2017-03-14 07:02:53 +00:00
|
|
|
#include <vector>
|
2017-11-03 09:39:26 +00:00
|
|
|
#include <mutex>
|
2017-12-03 06:05:16 +00:00
|
|
|
#include <cassert>
|
2017-01-20 03:52:40 +00:00
|
|
|
#include "boo/graphicsdev/IGraphicsDataFactory.hpp"
|
2017-12-03 06:05:16 +00:00
|
|
|
#include "../Common.hpp"
|
2017-01-20 03:52:40 +00:00
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
namespace boo {
|
2017-01-20 03:52:40 +00:00
|
|
|
|
2017-11-03 09:39:26 +00:00
|
|
|
struct BaseGraphicsData;
|
|
|
|
struct BaseGraphicsPool;
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
template <class NodeCls, class DataCls = BaseGraphicsData>
|
2017-11-03 09:39:26 +00:00
|
|
|
struct GraphicsDataNode;
|
|
|
|
|
|
|
|
/** Inherited by data factory implementations to track the head data and pool nodes */
|
2018-12-08 05:17:51 +00:00
|
|
|
struct GraphicsDataFactoryHead {
|
|
|
|
std::recursive_mutex m_dataMutex;
|
|
|
|
BaseGraphicsData* m_dataHead = nullptr;
|
|
|
|
BaseGraphicsPool* m_poolHead = nullptr;
|
|
|
|
|
|
|
|
~GraphicsDataFactoryHead() {
|
|
|
|
assert(m_dataHead == nullptr && "Dangling graphics data pools detected");
|
|
|
|
assert(m_poolHead == nullptr && "Dangling graphics data pools detected");
|
|
|
|
}
|
2017-11-05 06:12:49 +00:00
|
|
|
};
|
|
|
|
|
2017-11-03 09:39:26 +00:00
|
|
|
/** Private generalized data container class.
|
|
|
|
* Keeps head pointers to all graphics objects by type
|
|
|
|
*/
|
2018-12-08 05:17:51 +00:00
|
|
|
struct BaseGraphicsData : ListNode<BaseGraphicsData, GraphicsDataFactoryHead*> {
|
|
|
|
static BaseGraphicsData*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_dataHead; }
|
|
|
|
static std::unique_lock<std::recursive_mutex> _getHeadLock(GraphicsDataFactoryHead* head) {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
__BooTraceFields
|
|
|
|
|
|
|
|
GraphicsDataNode<IShaderStage, BaseGraphicsData>* m_Ss = nullptr;
|
|
|
|
GraphicsDataNode<IShaderPipeline, BaseGraphicsData>* m_SPs = nullptr;
|
|
|
|
GraphicsDataNode<IShaderDataBinding, BaseGraphicsData>* m_SBinds = nullptr;
|
|
|
|
GraphicsDataNode<IGraphicsBufferS, BaseGraphicsData>* m_SBufs = nullptr;
|
|
|
|
GraphicsDataNode<IGraphicsBufferD, BaseGraphicsData>* m_DBufs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureS, BaseGraphicsData>* m_STexs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureSA, BaseGraphicsData>* m_SATexs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureD, BaseGraphicsData>* m_DTexs = nullptr;
|
|
|
|
GraphicsDataNode<ITextureR, BaseGraphicsData>* m_RTexs = nullptr;
|
|
|
|
template <class T>
|
|
|
|
GraphicsDataNode<T, BaseGraphicsData>*& getHead();
|
|
|
|
template <class T>
|
|
|
|
size_t countForward() {
|
|
|
|
auto* head = getHead<T>();
|
|
|
|
return head ? head->countForward() : 0;
|
|
|
|
}
|
|
|
|
std::unique_lock<std::recursive_mutex> destructorLock() override {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{m_head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit BaseGraphicsData(GraphicsDataFactoryHead& head __BooTraceArgs)
|
|
|
|
: ListNode<BaseGraphicsData, GraphicsDataFactoryHead*>(&head) __BooTraceInitializer {}
|
2017-01-20 03:52:40 +00:00
|
|
|
};
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IShaderStage, BaseGraphicsData>*& BaseGraphicsData::getHead<IShaderStage>() {
|
|
|
|
return m_Ss;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IShaderPipeline, BaseGraphicsData>*& BaseGraphicsData::getHead<IShaderPipeline>() {
|
|
|
|
return m_SPs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IShaderDataBinding, BaseGraphicsData>*& BaseGraphicsData::getHead<IShaderDataBinding>() {
|
|
|
|
return m_SBinds;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IGraphicsBufferS, BaseGraphicsData>*& BaseGraphicsData::getHead<IGraphicsBufferS>() {
|
|
|
|
return m_SBufs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IGraphicsBufferD, BaseGraphicsData>*& BaseGraphicsData::getHead<IGraphicsBufferD>() {
|
|
|
|
return m_DBufs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureS, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureS>() {
|
|
|
|
return m_STexs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureSA, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureSA>() {
|
|
|
|
return m_SATexs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureD, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureD>() {
|
|
|
|
return m_DTexs;
|
|
|
|
}
|
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<ITextureR, BaseGraphicsData>*& BaseGraphicsData::getHead<ITextureR>() {
|
|
|
|
return m_RTexs;
|
|
|
|
}
|
2017-11-03 09:39:26 +00:00
|
|
|
|
|
|
|
/** Private generalized pool container class.
|
2017-11-05 06:12:49 +00:00
|
|
|
* Keeps head pointer to exactly one dynamic buffer while otherwise conforming to BaseGraphicsData
|
2017-11-03 09:39:26 +00:00
|
|
|
*/
|
2018-12-08 05:17:51 +00:00
|
|
|
struct BaseGraphicsPool : ListNode<BaseGraphicsPool, GraphicsDataFactoryHead*> {
|
|
|
|
static BaseGraphicsPool*& _getHeadPtr(GraphicsDataFactoryHead* head) { return head->m_poolHead; }
|
|
|
|
static std::unique_lock<std::recursive_mutex> _getHeadLock(GraphicsDataFactoryHead* head) {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
__BooTraceFields
|
|
|
|
|
|
|
|
GraphicsDataNode<IGraphicsBufferD, BaseGraphicsPool>* m_DBufs = nullptr;
|
|
|
|
template <class T>
|
|
|
|
GraphicsDataNode<T, BaseGraphicsPool>*& getHead();
|
|
|
|
template <class T>
|
|
|
|
size_t countForward() {
|
|
|
|
auto* head = getHead<T>();
|
|
|
|
return head ? head->countForward() : 0;
|
|
|
|
}
|
|
|
|
std::unique_lock<std::recursive_mutex> destructorLock() override {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{m_head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit BaseGraphicsPool(GraphicsDataFactoryHead& head __BooTraceArgs)
|
|
|
|
: ListNode<BaseGraphicsPool, GraphicsDataFactoryHead*>(&head) __BooTraceInitializer {}
|
2017-11-03 09:39:26 +00:00
|
|
|
};
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
template <>
|
|
|
|
inline GraphicsDataNode<IGraphicsBufferD, BaseGraphicsPool>*& BaseGraphicsPool::getHead<IGraphicsBufferD>() {
|
|
|
|
return m_DBufs;
|
|
|
|
}
|
2017-11-03 09:39:26 +00:00
|
|
|
|
|
|
|
/** Private generalised graphics object node.
|
|
|
|
* Keeps a strong reference to the data pool that it's a member of;
|
|
|
|
* as well as doubly-linked pointers to same-type sibling objects
|
|
|
|
*/
|
2018-12-08 05:17:51 +00:00
|
|
|
template <class NodeCls, class DataCls>
|
|
|
|
struct GraphicsDataNode : ListNode<GraphicsDataNode<NodeCls, DataCls>, ObjToken<DataCls>, NodeCls> {
|
|
|
|
using base = ListNode<GraphicsDataNode<NodeCls, DataCls>, ObjToken<DataCls>, NodeCls>;
|
|
|
|
static GraphicsDataNode<NodeCls, DataCls>*& _getHeadPtr(ObjToken<DataCls>& head) {
|
|
|
|
return head->template getHead<NodeCls>();
|
|
|
|
}
|
|
|
|
static std::unique_lock<std::recursive_mutex> _getHeadLock(ObjToken<DataCls>& head) {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{head->m_head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_lock<std::recursive_mutex> destructorLock() override {
|
|
|
|
return std::unique_lock<std::recursive_mutex>{base::m_head->m_head->m_dataMutex};
|
|
|
|
}
|
|
|
|
|
|
|
|
explicit GraphicsDataNode(const ObjToken<DataCls>& data)
|
|
|
|
: ListNode<GraphicsDataNode<NodeCls, DataCls>, ObjToken<DataCls>, NodeCls>(data) {}
|
|
|
|
|
|
|
|
class iterator {
|
|
|
|
GraphicsDataNode<NodeCls, DataCls>* m_node;
|
|
|
|
|
|
|
|
public:
|
|
|
|
using iterator_category = std::bidirectional_iterator_tag;
|
|
|
|
using value_type = NodeCls;
|
|
|
|
using difference_type = std::ptrdiff_t;
|
|
|
|
using pointer = NodeCls*;
|
|
|
|
using reference = NodeCls&;
|
|
|
|
|
|
|
|
explicit iterator(GraphicsDataNode<NodeCls, DataCls>* node) : m_node(node) {}
|
|
|
|
NodeCls& operator*() const { return *m_node; }
|
|
|
|
bool operator!=(const iterator& other) const { return m_node != other.m_node; }
|
|
|
|
iterator& operator++() {
|
|
|
|
m_node = m_node->m_next;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
iterator& operator--() {
|
|
|
|
m_node = m_node->m_prev;
|
|
|
|
return *this;
|
2017-11-06 06:53:54 +00:00
|
|
|
}
|
2018-12-08 05:17:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
iterator begin() { return iterator(this); }
|
|
|
|
iterator end() { return iterator(nullptr); }
|
|
|
|
|
|
|
|
size_t countForward() {
|
|
|
|
size_t ret = 0;
|
|
|
|
for (auto& n : *this)
|
|
|
|
++ret;
|
|
|
|
return ret;
|
|
|
|
}
|
2017-01-20 03:52:40 +00:00
|
|
|
};
|
|
|
|
|
2018-01-20 03:02:29 +00:00
|
|
|
void UpdateGammaLUT(ITextureD* tex, float gamma);
|
|
|
|
|
2018-12-08 05:17:51 +00:00
|
|
|
} // namespace boo
|