mirror of https://github.com/encounter/aurora.git
gfx: Limit # of synchronous pipelines per frame
Allows OpenGL(+ES) to run smoothly even when building initial pipeline cache.
This commit is contained in:
parent
ff0b43137c
commit
ea6e49e000
|
@ -109,6 +109,12 @@ namespace aurora::gfx {
|
|||
using NewPipelineCallback = std::function<wgpu::RenderPipeline()>;
|
||||
std::mutex g_pipelineMutex;
|
||||
static bool g_hasPipelineThread = false;
|
||||
static size_t g_pipelinesPerFrame = 0;
|
||||
#ifdef NDEBUG
|
||||
constexpr size_t BuildPipelinesPerFrame = 5;
|
||||
#else
|
||||
constexpr size_t BuildPipelinesPerFrame = 1;
|
||||
#endif
|
||||
static std::thread g_pipelineThread;
|
||||
static std::atomic_bool g_pipelineThreadEnd;
|
||||
static std::condition_variable g_pipelineCv;
|
||||
|
@ -176,20 +182,26 @@ static PipelineRef find_pipeline(ShaderType type, const PipelineConfig& config,
|
|||
std::scoped_lock guard{g_pipelineMutex};
|
||||
found = g_pipelines.contains(hash);
|
||||
if (!found) {
|
||||
if (g_hasPipelineThread) {
|
||||
const auto ref =
|
||||
std::find_if(g_queuedPipelines.begin(), g_queuedPipelines.end(), [=](auto v) { return v.first == hash; });
|
||||
if (g_hasPipelineThread) {
|
||||
if (ref != g_queuedPipelines.end()) {
|
||||
found = true;
|
||||
}
|
||||
} else {
|
||||
if (ref != g_queuedPipelines.end()) {
|
||||
found = true;
|
||||
} else if (g_pipelinesPerFrame < BuildPipelinesPerFrame) {
|
||||
g_pipelines.try_emplace(hash, cb());
|
||||
if (serialize) {
|
||||
serialize_pipeline_config(type, config);
|
||||
}
|
||||
++g_pipelinesPerFrame;
|
||||
createdPipelines++;
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
g_queuedPipelines.emplace_back(std::pair{hash, std::move(cb)});
|
||||
if (serialize) {
|
||||
|
@ -298,13 +310,17 @@ PipelineRef pipeline_ref(model::PipelineConfig config) {
|
|||
|
||||
static void pipeline_worker() {
|
||||
bool hasMore = false;
|
||||
while (true) {
|
||||
while (g_hasPipelineThread || g_pipelinesPerFrame < BuildPipelinesPerFrame) {
|
||||
std::pair<PipelineRef, NewPipelineCallback> cb;
|
||||
{
|
||||
std::unique_lock lock{g_pipelineMutex};
|
||||
if (g_hasPipelineThread) {
|
||||
if (!hasMore) {
|
||||
g_pipelineCv.wait(lock, [] { return !g_queuedPipelines.empty() || g_pipelineThreadEnd; });
|
||||
}
|
||||
} else if (g_queuedPipelines.empty()) {
|
||||
return;
|
||||
}
|
||||
if (g_pipelineThreadEnd) {
|
||||
break;
|
||||
}
|
||||
|
@ -318,6 +334,9 @@ static void pipeline_worker() {
|
|||
g_queuedPipelines.pop_front();
|
||||
hasMore = !g_queuedPipelines.empty();
|
||||
}
|
||||
if (!g_hasPipelineThread) {
|
||||
++g_pipelinesPerFrame;
|
||||
}
|
||||
createdPipelines++;
|
||||
queuedPipelines--;
|
||||
}
|
||||
|
@ -507,6 +526,10 @@ void begin_frame() {
|
|||
g_currentRenderPass = 0;
|
||||
// push_command(CommandType::SetViewport, Command::Data{.setViewport = g_cachedViewport});
|
||||
// push_command(CommandType::SetScissor, Command::Data{.setScissor = g_cachedScissor});
|
||||
|
||||
if (!g_hasPipelineThread) {
|
||||
g_pipelinesPerFrame = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void end_frame(const wgpu::CommandEncoder& cmd) {
|
||||
|
@ -545,6 +568,10 @@ void end_frame(const wgpu::CommandEncoder& cmd) {
|
|||
currentStagingBuffer = (currentStagingBuffer + 1) % g_stagingBuffers.size();
|
||||
map_staging_buffer();
|
||||
g_currentRenderPass = UINT32_MAX;
|
||||
|
||||
if (!g_hasPipelineThread) {
|
||||
pipeline_worker();
|
||||
}
|
||||
}
|
||||
|
||||
void render(wgpu::CommandEncoder& cmd) {
|
||||
|
|
Loading…
Reference in New Issue