From 51fb0a6f9647ba199da10fe4f018ee36e44e65ba Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 27 Feb 2021 11:56:04 -0800 Subject: core: Switch to unique_ptr for usage of Common::Fiber. - With using unique_ptr instead of shared_ptr, we have more explicit ownership of the context. - Fixes a memory leak due to circular reference of the shared pointer. --- src/core/hle/kernel/k_scheduler.cpp | 18 +++++++++--------- src/core/hle/kernel/k_scheduler.h | 10 +++++----- src/core/hle/kernel/k_thread.cpp | 6 +----- src/core/hle/kernel/k_thread.h | 10 ++++++++-- src/core/hle/kernel/svc.cpp | 3 +-- 5 files changed, 24 insertions(+), 23 deletions(-) (limited to 'src/core/hle') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index bb5f43b53..465036f3d 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -608,7 +608,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { } KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) { - switch_fiber = std::make_shared(OnSwitch, this); + switch_fiber = std::make_unique(OnSwitch, this); state.needs_scheduling.store(true); state.interrupt_task_thread_runnable = false; state.should_count_idle = false; @@ -726,15 +726,15 @@ void KScheduler::ScheduleImpl() { // Save context for previous thread Unload(previous_thread); - std::shared_ptr* old_context; + Common::Fiber* old_context; if (previous_thread != nullptr) { - old_context = &previous_thread->GetHostContext(); + old_context = previous_thread->GetHostContext(); } else { - old_context = &idle_thread->GetHostContext(); + old_context = idle_thread->GetHostContext(); } guard.unlock(); - Common::Fiber::YieldTo(*old_context, switch_fiber); + Common::Fiber::YieldTo(old_context, switch_fiber.get()); /// When a thread wakes up, the scheduler may have changed to other in another core. auto& next_scheduler = *system.Kernel().CurrentScheduler(); next_scheduler.SwitchContextStep2(); @@ -769,13 +769,13 @@ void KScheduler::SwitchToCurrent() { break; } } - std::shared_ptr* next_context; + Common::Fiber* next_context; if (next_thread != nullptr) { - next_context = &next_thread->GetHostContext(); + next_context = next_thread->GetHostContext(); } else { - next_context = &idle_thread->GetHostContext(); + next_context = idle_thread->GetHostContext(); } - Common::Fiber::YieldTo(switch_fiber, *next_context); + Common::Fiber::YieldTo(switch_fiber.get(), next_context); } while (!is_switch_pending()); } } diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index f595b9a5c..a4285c595 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -68,12 +68,12 @@ public: void OnThreadStart(); - [[nodiscard]] std::shared_ptr& ControlContext() { - return switch_fiber; + [[nodiscard]] Common::Fiber* ControlContext() { + return switch_fiber.get(); } - [[nodiscard]] const std::shared_ptr& ControlContext() const { - return switch_fiber; + [[nodiscard]] const Common::Fiber* ControlContext() const { + return switch_fiber.get(); } [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread); @@ -178,7 +178,7 @@ private: KThread* idle_thread; - std::shared_ptr switch_fiber{}; + std::unique_ptr switch_fiber{}; struct SchedulingState { std::atomic needs_scheduling; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 1661afbd9..f49e31b72 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -991,10 +991,6 @@ void KThread::SetState(ThreadState state) { } } -std::shared_ptr& KThread::GetHostContext() { - return host_context; -} - ResultVal> KThread::Create(Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority, u64 arg, s32 processor_id, @@ -1028,7 +1024,7 @@ ResultVal> KThread::Create(Core::System& system, Thread scheduler.AddThread(thread); thread->host_context = - std::make_shared(std::move(thread_start_func), thread_start_parameter); + std::make_unique(std::move(thread_start_func), thread_start_parameter); return MakeResult>(std::move(thread)); } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c8ac656a4..a2893d939 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -293,7 +293,13 @@ public: return thread_context_64; } - [[nodiscard]] std::shared_ptr& GetHostContext(); + [[nodiscard]] Common::Fiber* GetHostContext() { + return host_context.get(); + } + + [[nodiscard]] const Common::Fiber* GetHostContext() const { + return host_context.get(); + } [[nodiscard]] ThreadState GetState() const { return thread_state & ThreadState::Mask; @@ -719,7 +725,7 @@ private: Common::SpinLock context_guard{}; // For emulation - std::shared_ptr host_context{}; + std::unique_ptr host_context{}; // For debugging std::vector wait_objects_for_debugging; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index cc8fa6576..d04116115 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -2626,8 +2626,7 @@ void Call(Core::System& system, u32 immediate) { kernel.ExitSVCProfile(); if (!thread->IsCallingSvc()) { - auto* host_context = thread->GetHostContext().get(); - host_context->Rewind(); + thread->GetHostContext()->Rewind(); } system.EnterDynarmicProfile(); -- cgit v1.2.3