mirror of https://github.com/AxioDL/boo.git
IObj destructor race condition fix
This commit is contained in:
parent
d04c19a258
commit
1a2fc1d2a3
|
@ -2,6 +2,7 @@
|
|||
#define BOOOBJECT_HPP
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
|
||||
namespace boo
|
||||
{
|
||||
|
@ -9,13 +10,26 @@ namespace boo
|
|||
class IObj
|
||||
{
|
||||
std::atomic_int m_refCount = {0};
|
||||
protected:
|
||||
std::recursive_mutex* m_mutex = nullptr;
|
||||
public:
|
||||
virtual ~IObj() = default;
|
||||
void increment() { m_refCount++; }
|
||||
void decrement()
|
||||
{
|
||||
if (m_refCount.fetch_sub(1) == 1)
|
||||
delete this;
|
||||
{
|
||||
if (std::recursive_mutex* mutex = m_mutex)
|
||||
{
|
||||
mutex->lock();
|
||||
delete this;
|
||||
mutex->unlock();
|
||||
}
|
||||
else
|
||||
{
|
||||
delete this;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ struct GraphicsDataNode;
|
|||
/** Inherited by data factory implementations to track the head data and pool nodes */
|
||||
struct GraphicsDataFactoryHead
|
||||
{
|
||||
std::mutex m_dataMutex;
|
||||
std::recursive_mutex m_dataMutex;
|
||||
BaseGraphicsData* m_dataHead = nullptr;
|
||||
BaseGraphicsPool* m_poolHead = nullptr;
|
||||
};
|
||||
|
@ -68,7 +68,8 @@ struct BaseGraphicsData : IObj
|
|||
explicit BaseGraphicsData(GraphicsDataFactoryHead& head)
|
||||
: m_head(head)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(m_head.m_dataMutex);
|
||||
IObj::m_mutex = &m_head.m_dataMutex;
|
||||
std::lock_guard<std::recursive_mutex> lk(m_head.m_dataMutex);
|
||||
m_next = head.m_dataHead;
|
||||
if (m_next)
|
||||
m_next->m_prev = this;
|
||||
|
@ -76,7 +77,6 @@ struct BaseGraphicsData : IObj
|
|||
}
|
||||
~BaseGraphicsData()
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(m_head.m_dataMutex);
|
||||
if (m_prev)
|
||||
{
|
||||
if (m_next)
|
||||
|
@ -131,7 +131,8 @@ struct BaseGraphicsPool : IObj
|
|||
explicit BaseGraphicsPool(GraphicsDataFactoryHead& head)
|
||||
: m_head(head)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(m_head.m_dataMutex);
|
||||
IObj::m_mutex = &m_head.m_dataMutex;
|
||||
std::lock_guard<std::recursive_mutex> lk(m_head.m_dataMutex);
|
||||
m_next = head.m_poolHead;
|
||||
if (m_next)
|
||||
m_next->m_prev = this;
|
||||
|
@ -139,7 +140,6 @@ struct BaseGraphicsPool : IObj
|
|||
}
|
||||
~BaseGraphicsPool()
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(m_head.m_dataMutex);
|
||||
if (m_prev)
|
||||
{
|
||||
if (m_next)
|
||||
|
@ -176,7 +176,8 @@ struct GraphicsDataNode : NodeCls
|
|||
explicit GraphicsDataNode(const ObjToken<DataCls>& data)
|
||||
: m_data(data)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(m_data->m_head.m_dataMutex);
|
||||
IObj::m_mutex = &m_data->m_head.m_dataMutex;
|
||||
std::lock_guard<std::recursive_mutex> lk(m_data->m_head.m_dataMutex);
|
||||
m_next = data->template getHead<NodeCls>();
|
||||
if (m_next)
|
||||
m_next->m_prev = this;
|
||||
|
@ -184,7 +185,6 @@ struct GraphicsDataNode : NodeCls
|
|||
}
|
||||
~GraphicsDataNode()
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(m_data->m_head.m_dataMutex);
|
||||
if (m_prev)
|
||||
{
|
||||
if (m_next)
|
||||
|
|
|
@ -1509,7 +1509,7 @@ void D3D11CommandQueue::ProcessDynamicLoads(ID3D11DeviceContext* ctx)
|
|||
{
|
||||
D3D11DataFactory* gfxF = static_cast<D3D11DataFactory*>(m_parent->getDataFactory());
|
||||
std::unique_lock<std::recursive_mutex> lk(m_dynamicLock);
|
||||
std::unique_lock<std::mutex> datalk(gfxF->m_dataMutex);
|
||||
std::unique_lock<std::recursive_mutex> datalk(gfxF->m_dataMutex);
|
||||
|
||||
if (gfxF->m_dataHead)
|
||||
{
|
||||
|
|
|
@ -2078,7 +2078,7 @@ void D3D12CommandQueue::execute()
|
|||
|
||||
/* Stage dynamic uploads */
|
||||
D3D12DataFactory* gfxF = static_cast<D3D12DataFactory*>(m_parent->getDataFactory());
|
||||
std::unique_lock<std::mutex> datalk(gfxF->m_dataMutex);
|
||||
std::unique_lock<std::recursive_mutex> datalk(gfxF->m_dataMutex);
|
||||
if (gfxF->m_dataHead)
|
||||
{
|
||||
for (BaseGraphicsData& d : *gfxF->m_dataHead)
|
||||
|
|
|
@ -1457,7 +1457,7 @@ struct GLCommandQueue : IGraphicsCommandQueue
|
|||
|
||||
/* Update dynamic data here */
|
||||
GLDataFactoryImpl* gfxF = static_cast<GLDataFactoryImpl*>(m_parent->getDataFactory());
|
||||
std::unique_lock<std::mutex> datalk(gfxF->m_dataMutex);
|
||||
std::unique_lock<std::recursive_mutex> datalk(gfxF->m_dataMutex);
|
||||
if (gfxF->m_dataHead)
|
||||
{
|
||||
for (BaseGraphicsData& d : *gfxF->m_dataHead)
|
||||
|
|
|
@ -1231,7 +1231,7 @@ struct MetalCommandQueue : IGraphicsCommandQueue
|
|||
|
||||
/* Update dynamic data here */
|
||||
MetalDataFactoryImpl* gfxF = static_cast<MetalDataFactoryImpl*>(m_parent->getDataFactory());
|
||||
std::unique_lock<std::mutex> datalk(gfxF->m_dataMutex);
|
||||
std::unique_lock<std::recursive_mutex> datalk(gfxF->m_dataMutex);
|
||||
if (gfxF->m_dataHead)
|
||||
{
|
||||
for (BaseGraphicsData& d : *gfxF->m_dataHead)
|
||||
|
|
|
@ -3558,7 +3558,7 @@ void VulkanCommandQueue::execute()
|
|||
|
||||
/* Stage dynamic uploads */
|
||||
VulkanDataFactoryImpl* gfxF = static_cast<VulkanDataFactoryImpl*>(m_parent->getDataFactory());
|
||||
std::unique_lock<std::mutex> datalk(gfxF->m_dataMutex);
|
||||
std::unique_lock<std::recursive_mutex> datalk(gfxF->m_dataMutex);
|
||||
if (gfxF->m_dataHead)
|
||||
{
|
||||
for (BaseGraphicsData& d : *gfxF->m_dataHead)
|
||||
|
|
|
@ -120,7 +120,8 @@ public:
|
|||
/* Spawn client thread */
|
||||
m_clientThread = std::thread([&]()
|
||||
{
|
||||
logvisor::RegisterThreadName("Boo Client Thread");
|
||||
std::string thrName = getFriendlyName() + " Client Thread";
|
||||
logvisor::RegisterThreadName(thrName.c_str());
|
||||
|
||||
/* Run app */
|
||||
m_clientReturn = m_callback.appMain(this);
|
||||
|
@ -205,7 +206,8 @@ int ApplicationRun(IApplication::EPlatformType platform,
|
|||
const std::vector<SystemString>& args,
|
||||
bool singleInstance)
|
||||
{
|
||||
logvisor::RegisterThreadName("Boo Main Thread");
|
||||
std::string thrName = friendlyName + " Main Thread";
|
||||
logvisor::RegisterThreadName(thrName.c_str());
|
||||
@autoreleasepool
|
||||
{
|
||||
if (!APP)
|
||||
|
|
Loading…
Reference in New Issue