From f48187449ed4772044da55a8a3745d578c1b8c48 Mon Sep 17 00:00:00 2001 From: David Marcec Date: Thu, 16 Jul 2020 18:38:35 +1000 Subject: Use conditional var --- src/video_core/shader/async_shaders.cpp | 14 +++++++++----- src/video_core/shader/async_shaders.h | 10 ++++++---- 2 files changed, 15 insertions(+), 9 deletions(-) (limited to 'src/video_core/shader') diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index fb94ac2e7..84d86c32f 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -59,7 +59,6 @@ void AsyncShaders::KillWorkers() { } bool AsyncShaders::HasWorkQueued() { - std::shared_lock lock(queue_mutex); return !pending_queue.empty(); } @@ -118,26 +117,31 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device, cpu_addr}; std::unique_lock lock(queue_mutex); pending_queue.push_back(std::move(params)); + cv.notify_one(); } void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) { using namespace std::chrono_literals; while (!is_thread_exiting.load(std::memory_order_relaxed)) { + std::unique_lock lock(queue_mutex); + cv.wait(lock, [&] { return HasWorkQueued() || is_thread_exiting; }); + if (is_thread_exiting) { + return; + } + // Partial lock to allow all threads to read at the same time if (!HasWorkQueued()) { continue; } - // Complete lock for pulling workload - queue_mutex.lock(); // Another thread beat us, just unlock and wait for the next load if (pending_queue.empty()) { - queue_mutex.unlock(); continue; } // Pull work from queue WorkerParams work = std::move(pending_queue.front()); pending_queue.pop_front(); - queue_mutex.unlock(); + + lock.unlock(); if (work.backend == AsyncShaders::Backend::OpenGL || work.backend == AsyncShaders::Backend::GLASM) { diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h index 26bc38326..2f5ee94ad 100644 --- a/src/video_core/shader/async_shaders.h +++ b/src/video_core/shader/async_shaders.h @@ -4,6 +4,7 @@ #pragma once +#include #include #include #include @@ -59,9 +60,6 @@ public: // Force end all threads void KillWorkers(); - /// Check our worker queue to see if we have any work queued already - bool HasWorkQueued(); - /// Check to see if any shaders have actually been compiled bool HasCompletedWork(); @@ -81,6 +79,9 @@ public: private: void ShaderCompilerThread(Core::Frontend::GraphicsContext* context); + /// Check our worker queue to see if we have any work queued already + bool HasWorkQueued(); + struct WorkerParams { AsyncShaders::Backend backend; OpenGL::Device device; @@ -94,7 +95,8 @@ private: VAddr cpu_address; }; - std::shared_mutex queue_mutex; + std::condition_variable cv; + std::mutex queue_mutex; std::shared_mutex completed_mutex; std::atomic is_thread_exiting{}; std::vector> context_list; -- cgit v1.2.3