summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFernando S <fsahmkow27@gmail.com>2022-04-16 00:05:04 +0200
committerGitHub <noreply@github.com>2022-04-16 00:05:04 +0200
commit34710065e84ccc3de4433b7dd0ffb569e14788b8 (patch)
tree5e96d11546befd9671dff252e8e9e8a693b0bd1a
parentMerge pull request #8190 from Docteh/palswap (diff)
parentcore: hle: kernel: k_thread: Rework dummy thread waiting. (diff)
downloadyuzu-34710065e84ccc3de4433b7dd0ffb569e14788b8.tar
yuzu-34710065e84ccc3de4433b7dd0ffb569e14788b8.tar.gz
yuzu-34710065e84ccc3de4433b7dd0ffb569e14788b8.tar.bz2
yuzu-34710065e84ccc3de4433b7dd0ffb569e14788b8.tar.lz
yuzu-34710065e84ccc3de4433b7dd0ffb569e14788b8.tar.xz
yuzu-34710065e84ccc3de4433b7dd0ffb569e14788b8.tar.zst
yuzu-34710065e84ccc3de4433b7dd0ffb569e14788b8.zip
-rw-r--r--src/common/fiber.cpp5
-rw-r--r--src/core/core_timing.h6
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h3
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp8
-rw-r--r--src/core/hle/kernel/k_spin_lock.cpp39
-rw-r--r--src/core/hle/kernel/k_spin_lock.h4
-rw-r--r--src/core/hle/kernel/k_thread.cpp39
-rw-r--r--src/core/hle/kernel/k_thread.h13
-rw-r--r--src/core/hle/kernel/physical_core.cpp3
-rw-r--r--src/core/hle/kernel/physical_core.h7
-rw-r--r--src/core/hle/service/ldr/ldr.cpp3
-rw-r--r--src/core/hle/service/service.h5
12 files changed, 46 insertions, 89 deletions
diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp
index 81b212e4b..177a74deb 100644
--- a/src/common/fiber.cpp
+++ b/src/common/fiber.cpp
@@ -2,9 +2,10 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <mutex>
+
#include "common/assert.h"
#include "common/fiber.h"
-#include "common/spin_lock.h"
#include "common/virtual_buffer.h"
#include <boost/context/detail/fcontext.hpp>
@@ -19,7 +20,7 @@ struct Fiber::FiberImpl {
VirtualBuffer<u8> stack;
VirtualBuffer<u8> rewind_stack;
- SpinLock guard{};
+ std::mutex guard;
std::function<void(void*)> entry_point;
std::function<void(void*)> rewind_point;
void* rewind_parameter{};
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 888828fd0..28b63be43 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -8,13 +8,13 @@
#include <chrono>
#include <functional>
#include <memory>
+#include <mutex>
#include <optional>
#include <string>
#include <thread>
#include <vector>
#include "common/common_types.h"
-#include "common/spin_lock.h"
#include "common/thread.h"
#include "common/wall_clock.h"
@@ -149,8 +149,8 @@ private:
std::shared_ptr<EventType> ev_lost;
Common::Event event{};
Common::Event pause_event{};
- Common::SpinLock basic_lock{};
- Common::SpinLock advance_lock{};
+ std::mutex basic_lock;
+ std::mutex advance_lock;
std::unique_ptr<std::thread> timer_thread;
std::atomic<bool> paused{};
std::atomic<bool> paused_set{};
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 6f44b534f..47425a3a1 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -8,7 +8,6 @@
#include <vector>
#include "common/common_types.h"
-#include "common/spin_lock.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_priority_queue.h"
#include "core/hle/kernel/k_scheduler_lock.h"
@@ -80,7 +79,7 @@ private:
/// Lists all thread ids that aren't deleted/etc.
std::vector<KThread*> thread_list;
- Common::SpinLock global_list_guard{};
+ std::mutex global_list_guard;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6c0bb1672..526eb4b70 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -705,7 +705,7 @@ void KScheduler::Unload(KThread* thread) {
prev_thread = nullptr;
}
- thread->context_guard.Unlock();
+ thread->context_guard.unlock();
}
void KScheduler::Reload(KThread* thread) {
@@ -794,13 +794,13 @@ void KScheduler::SwitchToCurrent() {
do {
auto next_thread = current_thread.load();
if (next_thread != nullptr) {
- const auto locked = next_thread->context_guard.TryLock();
+ const auto locked = next_thread->context_guard.try_lock();
if (state.needs_scheduling.load()) {
- next_thread->context_guard.Unlock();
+ next_thread->context_guard.unlock();
break;
}
if (next_thread->GetActiveCore() != core_id) {
- next_thread->context_guard.Unlock();
+ next_thread->context_guard.unlock();
break;
}
if (!locked) {
diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp
index 4412aa4bb..527ff0f9f 100644
--- a/src/core/hle/kernel/k_spin_lock.cpp
+++ b/src/core/hle/kernel/k_spin_lock.cpp
@@ -4,51 +4,18 @@
#include "core/hle/kernel/k_spin_lock.h"
-#if _MSC_VER
-#include <intrin.h>
-#if _M_AMD64
-#define __x86_64__ 1
-#endif
-#if _M_ARM64
-#define __aarch64__ 1
-#endif
-#else
-#if __x86_64__
-#include <xmmintrin.h>
-#endif
-#endif
-
-namespace {
-
-void ThreadPause() {
-#if __x86_64__
- _mm_pause();
-#elif __aarch64__ && _MSC_VER
- __yield();
-#elif __aarch64__
- asm("yield");
-#endif
-}
-
-} // namespace
-
namespace Kernel {
void KSpinLock::Lock() {
- while (lck.test_and_set(std::memory_order_acquire)) {
- ThreadPause();
- }
+ lck.lock();
}
void KSpinLock::Unlock() {
- lck.clear(std::memory_order_release);
+ lck.unlock();
}
bool KSpinLock::TryLock() {
- if (lck.test_and_set(std::memory_order_acquire)) {
- return false;
- }
- return true;
+ return lck.try_lock();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h
index 4d87d006a..7868b25a5 100644
--- a/src/core/hle/kernel/k_spin_lock.h
+++ b/src/core/hle/kernel/k_spin_lock.h
@@ -4,7 +4,7 @@
#pragma once
-#include <atomic>
+#include <mutex>
#include "core/hle/kernel/k_scoped_lock.h"
@@ -25,7 +25,7 @@ public:
[[nodiscard]] bool TryLock();
private:
- std::atomic_flag lck = ATOMIC_FLAG_INIT;
+ std::mutex lck;
};
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index d3bb1c871..af71987e8 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -723,10 +723,10 @@ void KThread::UpdateState() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Set our suspend flags in state.
- const ThreadState old_state = thread_state;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
const auto new_state =
static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
- thread_state = new_state;
+ thread_state.store(new_state, std::memory_order_relaxed);
// Note the state change in scheduler.
if (new_state != old_state) {
@@ -738,8 +738,8 @@ void KThread::Continue() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Clear our suspend flags in state.
- const ThreadState old_state = thread_state;
- thread_state = old_state & ThreadState::Mask;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
// Note the state change in scheduler.
KScheduler::OnThreadStateChanged(kernel, this, old_state);
@@ -1079,17 +1079,10 @@ void KThread::IfDummyThreadTryWait() {
return;
}
- // Block until we can grab the lock.
- KScopedSpinLock lk{dummy_wait_lock};
-}
-
-void KThread::IfDummyThreadBeginWait() {
- if (!IsDummyThread()) {
- return;
- }
-
- // Ensure the thread will block when IfDummyThreadTryWait is called.
- dummy_wait_lock.Lock();
+ // Block until we are no longer waiting.
+ std::unique_lock lk(dummy_wait_lock);
+ dummy_wait_cv.wait(
+ lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
}
void KThread::IfDummyThreadEndWait() {
@@ -1097,8 +1090,8 @@ void KThread::IfDummyThreadEndWait() {
return;
}
- // Ensure the thread will no longer block.
- dummy_wait_lock.Unlock();
+ // Wake up the waiting thread.
+ dummy_wait_cv.notify_one();
}
void KThread::BeginWait(KThreadQueue* queue) {
@@ -1107,9 +1100,6 @@ void KThread::BeginWait(KThreadQueue* queue) {
// Set our wait queue.
wait_queue = queue;
-
- // Special case for dummy threads to ensure they block.
- IfDummyThreadBeginWait();
}
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@@ -1158,10 +1148,11 @@ void KThread::SetState(ThreadState state) {
SetMutexWaitAddressForDebugging({});
SetWaitReasonForDebugging({});
- const ThreadState old_state = thread_state;
- thread_state =
- static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
- if (thread_state != old_state) {
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(
+ static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
+ std::memory_order_relaxed);
+ if (thread_state.load(std::memory_order_relaxed) != old_state) {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index d0fd85130..4892fdf76 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -6,6 +6,8 @@
#include <array>
#include <atomic>
+#include <condition_variable>
+#include <mutex>
#include <span>
#include <string>
#include <utility>
@@ -15,6 +17,7 @@
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
+#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/k_light_lock.h"
@@ -256,11 +259,11 @@ public:
[[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
[[nodiscard]] ThreadState GetState() const {
- return thread_state & ThreadState::Mask;
+ return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
}
[[nodiscard]] ThreadState GetRawState() const {
- return thread_state;
+ return thread_state.load(std::memory_order_relaxed);
}
void SetState(ThreadState state);
@@ -642,7 +645,6 @@ public:
// blocking as needed.
void IfDummyThreadTryWait();
- void IfDummyThreadBeginWait();
void IfDummyThreadEndWait();
private:
@@ -762,13 +764,14 @@ private:
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
- KSpinLock context_guard{};
- KSpinLock dummy_wait_lock{};
+ Common::SpinLock context_guard{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
ThreadType thread_type{};
+ std::mutex dummy_wait_lock;
+ std::condition_variable dummy_wait_cv;
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 18a5f40f8..cc49e8c7e 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "common/spin_lock.h"
#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
@@ -16,7 +15,7 @@ namespace Kernel {
PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
Core::CPUInterrupts& interrupts_)
: core_index{core_index_}, system{system_}, scheduler{scheduler_},
- interrupts{interrupts_}, guard{std::make_unique<Common::SpinLock>()} {
+ interrupts{interrupts_}, guard{std::make_unique<std::mutex>()} {
#ifdef ARCHITECTURE_x86_64
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 16a032e89..f2112fc1d 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -6,13 +6,10 @@
#include <cstddef>
#include <memory>
+#include <mutex>
#include "core/arm/arm_interface.h"
-namespace Common {
-class SpinLock;
-}
-
namespace Kernel {
class KScheduler;
} // namespace Kernel
@@ -91,7 +88,7 @@ private:
Core::System& system;
Kernel::KScheduler& scheduler;
Core::CPUInterrupts& interrupts;
- std::unique_ptr<Common::SpinLock> guard;
+ std::unique_ptr<std::mutex> guard;
std::unique_ptr<Core::ARM_Interface> arm_interface;
};
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index cf727c167..42f9cf811 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -160,7 +160,8 @@ public:
class RelocatableObject final : public ServiceFramework<RelocatableObject> {
public:
- explicit RelocatableObject(Core::System& system_) : ServiceFramework{system_, "ldr:ro"} {
+ explicit RelocatableObject(Core::System& system_)
+ : ServiceFramework{system_, "ldr:ro", ServiceThreadType::CreateNew} {
// clang-format off
static const FunctionInfo functions[] = {
{0, &RelocatableObject::LoadModule, "LoadModule"},
diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h
index c78b2baeb..148265218 100644
--- a/src/core/hle/service/service.h
+++ b/src/core/hle/service/service.h
@@ -9,7 +9,6 @@
#include <string>
#include <boost/container/flat_map.hpp>
#include "common/common_types.h"
-#include "common/spin_lock.h"
#include "core/hle/kernel/hle_ipc.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -90,7 +89,7 @@ protected:
using HandlerFnP = void (Self::*)(Kernel::HLERequestContext&);
/// Used to gain exclusive access to the service members, e.g. from CoreTiming thread.
- [[nodiscard]] std::scoped_lock<Common::SpinLock> LockService() {
+ [[nodiscard]] std::scoped_lock<std::mutex> LockService() {
return std::scoped_lock{lock_service};
}
@@ -135,7 +134,7 @@ private:
boost::container::flat_map<u32, FunctionInfoBase> handlers_tipc;
/// Used to gain exclusive access to the service members, e.g. from CoreTiming thread.
- Common::SpinLock lock_service;
+ std::mutex lock_service;
};
/**