mirror of https://github.com/AxioDL/metaforce.git
Merge pull request #11 from lioncash/lock
General: Make use of lock deduction guides
This commit is contained in:
commit
3ef555dba4
|
@ -71,7 +71,7 @@ void ClientProcess::BufferTransaction::run(blender::Token& btok) {
|
|||
void ClientProcess::CookTransaction::run(blender::Token& btok) {
|
||||
m_dataSpec->setThreadProject();
|
||||
m_returnResult = m_parent.syncCook(m_path, m_dataSpec, btok, m_force, m_fast);
|
||||
std::unique_lock<std::mutex> lk(m_parent.m_mutex);
|
||||
std::unique_lock lk{m_parent.m_mutex};
|
||||
++m_parent.m_completedCooks;
|
||||
m_parent.m_progPrinter->setMainFactor(m_parent.m_completedCooks / float(m_parent.m_addedCooks));
|
||||
m_complete = true;
|
||||
|
@ -92,7 +92,7 @@ void ClientProcess::Worker::proc() {
|
|||
std::string thrName = fmt::format(fmt("HECL Worker {}"), m_idx);
|
||||
logvisor::RegisterThreadName(thrName.c_str());
|
||||
|
||||
std::unique_lock<std::mutex> lk(m_proc.m_mutex);
|
||||
std::unique_lock lk{m_proc.m_mutex};
|
||||
while (m_proc.m_running) {
|
||||
if (!m_didInit) {
|
||||
m_proc.m_initCv.notify_one();
|
||||
|
@ -125,7 +125,7 @@ ClientProcess::ClientProcess(const MultiProgressPrinter* progPrinter) : m_progPr
|
|||
#endif
|
||||
m_workers.reserve(cpuCount);
|
||||
for (int i = 0; i < cpuCount; ++i) {
|
||||
std::unique_lock<std::mutex> lk(m_mutex);
|
||||
std::unique_lock lk{m_mutex};
|
||||
m_workers.emplace_back(*this, m_workers.size());
|
||||
m_initCv.wait(lk);
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ ClientProcess::ClientProcess(const MultiProgressPrinter* progPrinter) : m_progPr
|
|||
std::shared_ptr<const ClientProcess::BufferTransaction> ClientProcess::addBufferTransaction(const ProjectPath& path,
|
||||
void* target, size_t maxLen,
|
||||
size_t offset) {
|
||||
std::unique_lock<std::mutex> lk(m_mutex);
|
||||
std::unique_lock lk{m_mutex};
|
||||
auto ret = std::make_shared<BufferTransaction>(*this, path, target, maxLen, offset);
|
||||
m_pendingQueue.emplace_back(ret);
|
||||
m_cv.notify_one();
|
||||
|
@ -144,7 +144,7 @@ std::shared_ptr<const ClientProcess::BufferTransaction> ClientProcess::addBuffer
|
|||
std::shared_ptr<const ClientProcess::CookTransaction> ClientProcess::addCookTransaction(const hecl::ProjectPath& path,
|
||||
bool force, bool fast,
|
||||
Database::IDataSpec* spec) {
|
||||
std::unique_lock<std::mutex> lk(m_mutex);
|
||||
std::unique_lock lk{m_mutex};
|
||||
auto ret = std::make_shared<CookTransaction>(*this, path, force, fast, spec);
|
||||
m_pendingQueue.emplace_back(ret);
|
||||
m_cv.notify_one();
|
||||
|
@ -155,7 +155,7 @@ std::shared_ptr<const ClientProcess::CookTransaction> ClientProcess::addCookTran
|
|||
|
||||
std::shared_ptr<const ClientProcess::LambdaTransaction>
|
||||
ClientProcess::addLambdaTransaction(std::function<void(blender::Token&)>&& func) {
|
||||
std::unique_lock<std::mutex> lk(m_mutex);
|
||||
std::unique_lock lk{m_mutex};
|
||||
auto ret = std::make_shared<LambdaTransaction>(*this, std::move(func));
|
||||
m_pendingQueue.emplace_back(ret);
|
||||
m_cv.notify_one();
|
||||
|
@ -204,12 +204,12 @@ bool ClientProcess::syncCook(const hecl::ProjectPath& path, Database::IDataSpec*
|
|||
}
|
||||
|
||||
void ClientProcess::swapCompletedQueue(std::list<std::shared_ptr<Transaction>>& queue) {
|
||||
std::unique_lock<std::mutex> lk(m_mutex);
|
||||
std::unique_lock lk{m_mutex};
|
||||
queue.swap(m_completedQueue);
|
||||
}
|
||||
|
||||
void ClientProcess::waitUntilComplete() {
|
||||
std::unique_lock<std::mutex> lk(m_mutex);
|
||||
std::unique_lock lk{m_mutex};
|
||||
while (isBusy())
|
||||
m_waitCv.wait(lk);
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ void ClientProcess::waitUntilComplete() {
|
|||
void ClientProcess::shutdown() {
|
||||
if (!m_running)
|
||||
return;
|
||||
std::unique_lock<std::mutex> lk(m_mutex);
|
||||
std::unique_lock lk{m_mutex};
|
||||
m_pendingQueue.clear();
|
||||
m_running = false;
|
||||
m_cv.notify_all();
|
||||
|
|
|
@ -242,9 +242,12 @@ void MultiProgressPrinter::DoPrint() {
|
|||
void MultiProgressPrinter::LogProc() {
|
||||
while (m_running) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
if (!m_dirty && !m_mainIndeterminate)
|
||||
|
||||
if (!m_dirty && !m_mainIndeterminate) {
|
||||
continue;
|
||||
std::lock_guard<std::mutex> lk(m_logLock);
|
||||
}
|
||||
|
||||
std::lock_guard lk{m_logLock};
|
||||
DoPrint();
|
||||
}
|
||||
}
|
||||
|
@ -280,22 +283,30 @@ MultiProgressPrinter::~MultiProgressPrinter() {
|
|||
|
||||
void MultiProgressPrinter::print(const hecl::SystemChar* message, const hecl::SystemChar* submessage, float factor,
|
||||
int threadIdx) const {
|
||||
if (!m_running)
|
||||
if (!m_running) {
|
||||
return;
|
||||
std::lock_guard<std::mutex> lk(m_logLock);
|
||||
if (threadIdx < 0)
|
||||
}
|
||||
|
||||
std::lock_guard lk{m_logLock};
|
||||
if (threadIdx < 0) {
|
||||
threadIdx = 0;
|
||||
if (threadIdx >= m_threadStats.size())
|
||||
}
|
||||
if (threadIdx >= m_threadStats.size()) {
|
||||
m_threadStats.resize(threadIdx + 1);
|
||||
}
|
||||
|
||||
ThreadStat& stat = m_threadStats[threadIdx];
|
||||
if (message)
|
||||
if (message) {
|
||||
stat.m_message = message;
|
||||
else
|
||||
} else {
|
||||
stat.m_message.clear();
|
||||
if (submessage)
|
||||
}
|
||||
if (submessage) {
|
||||
stat.m_submessage = submessage;
|
||||
else
|
||||
} else {
|
||||
stat.m_submessage.clear();
|
||||
}
|
||||
|
||||
stat.m_factor = factor;
|
||||
stat.m_active = true;
|
||||
m_latestThread = threadIdx;
|
||||
|
@ -303,18 +314,23 @@ void MultiProgressPrinter::print(const hecl::SystemChar* message, const hecl::Sy
|
|||
}
|
||||
|
||||
void MultiProgressPrinter::setMainFactor(float factor) const {
|
||||
if (!m_running)
|
||||
if (!m_running) {
|
||||
return;
|
||||
std::lock_guard<std::mutex> lk(m_logLock);
|
||||
if (!m_mainIndeterminate)
|
||||
}
|
||||
|
||||
std::lock_guard lk{m_logLock};
|
||||
if (!m_mainIndeterminate) {
|
||||
m_dirty = true;
|
||||
}
|
||||
m_mainFactor = factor;
|
||||
}
|
||||
|
||||
void MultiProgressPrinter::setMainIndeterminate(bool indeterminate) const {
|
||||
if (!m_running)
|
||||
if (!m_running) {
|
||||
return;
|
||||
std::lock_guard<std::mutex> lk(m_logLock);
|
||||
}
|
||||
|
||||
std::lock_guard lk{m_logLock};
|
||||
if (m_mainIndeterminate != indeterminate) {
|
||||
m_mainIndeterminate = indeterminate;
|
||||
m_dirty = true;
|
||||
|
@ -322,9 +338,11 @@ void MultiProgressPrinter::setMainIndeterminate(bool indeterminate) const {
|
|||
}
|
||||
|
||||
void MultiProgressPrinter::startNewLine() const {
|
||||
if (!m_running)
|
||||
if (!m_running) {
|
||||
return;
|
||||
std::lock_guard<std::mutex> lk(m_logLock);
|
||||
}
|
||||
|
||||
std::lock_guard lk{m_logLock};
|
||||
const_cast<MultiProgressPrinter&>(*this).DoPrint();
|
||||
m_threadStats.clear();
|
||||
m_latestThread = -1;
|
||||
|
@ -335,7 +353,7 @@ void MultiProgressPrinter::startNewLine() const {
|
|||
}
|
||||
|
||||
void MultiProgressPrinter::flush() const {
|
||||
std::lock_guard<std::mutex> lk(m_logLock);
|
||||
std::lock_guard lk{m_logLock};
|
||||
const_cast<MultiProgressPrinter&>(*this).DoPrint();
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ static std::mutex PathsMutex;
|
|||
static std::unordered_map<std::thread::id, ProjectPath> PathsInProgress;
|
||||
|
||||
bool ResourceLock::InProgress(const ProjectPath& path) {
|
||||
std::unique_lock<std::mutex> lk(PathsMutex);
|
||||
std::unique_lock lk{PathsMutex};
|
||||
for (const auto& p : PathsInProgress)
|
||||
if (p.second == path)
|
||||
return true;
|
||||
|
@ -126,7 +126,7 @@ bool ResourceLock::InProgress(const ProjectPath& path) {
|
|||
}
|
||||
|
||||
bool ResourceLock::SetThreadRes(const ProjectPath& path) {
|
||||
std::unique_lock<std::mutex> lk(PathsMutex);
|
||||
std::unique_lock lk{PathsMutex};
|
||||
if (PathsInProgress.find(std::this_thread::get_id()) != PathsInProgress.cend())
|
||||
LogModule.report(logvisor::Fatal, fmt("multiple resource locks on thread"));
|
||||
|
||||
|
@ -139,7 +139,7 @@ bool ResourceLock::SetThreadRes(const ProjectPath& path) {
|
|||
}
|
||||
|
||||
void ResourceLock::ClearThreadRes() {
|
||||
std::unique_lock<std::mutex> lk(PathsMutex);
|
||||
std::unique_lock lk{PathsMutex};
|
||||
PathsInProgress.erase(std::this_thread::get_id());
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue