summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/k_scheduler.cpp
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2020-12-03 03:08:35 +0100
committerbunnei <bunneidev@gmail.com>2020-12-06 09:03:24 +0100
commit9e29e36a784496f7290c03b6a42e400a164a5b1e (patch)
treed33cc91b4651b374e0c244be7b7e3b47ef7680fd /src/core/hle/kernel/k_scheduler.cpp
parenthle: kernel: physical_core: Clear exclusive state after each run. (diff)
downloadyuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar.gz
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar.bz2
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar.lz
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar.xz
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar.zst
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.zip
Diffstat (limited to 'src/core/hle/kernel/k_scheduler.cpp')
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp873
1 files changed, 873 insertions, 0 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
new file mode 100644
index 000000000..7f7da610d
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -0,0 +1,873 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#include <algorithm>
+#include <mutex>
+#include <set>
+#include <unordered_set>
+#include <utility>
+
+#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/fiber.h"
+#include "common/logging/log.h"
+#include "core/arm/arm_interface.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/cpu_manager.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
+
+namespace Kernel {
+
+static void IncrementScheduledCount(Kernel::Thread* thread) {
+ if (auto process = thread->GetOwnerProcess(); process) {
+ process->IncrementScheduledCount();
+ }
+}
+
+GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
+ : kernel{kernel}, scheduler_lock{kernel} {}
+
+GlobalSchedulerContext::~GlobalSchedulerContext() = default;
+
+/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
+ Core::EmuThreadHandle global_thread) {
+ u32 current_core = global_thread.host_handle;
+ bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
+ (current_core < Core::Hardware::NUM_CPU_CORES);
+
+ while (cores_pending_reschedule != 0) {
+ u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
+ ASSERT(core < Core::Hardware::NUM_CPU_CORES);
+ if (!must_context_switch || core != current_core) {
+ auto& phys_core = kernel.PhysicalCore(core);
+ phys_core.Interrupt();
+ } else {
+ must_context_switch = true;
+ }
+ cores_pending_reschedule &= ~(1ULL << core);
+ }
+ if (must_context_switch) {
+ auto core_scheduler = kernel.CurrentScheduler();
+ kernel.ExitSVCProfile();
+ core_scheduler->RescheduleCurrentCore();
+ kernel.EnterSVCProfile();
+ }
+}
+
+u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
+ std::scoped_lock lock{guard};
+ if (Thread* prev_highest_thread = this->state.highest_priority_thread;
+ prev_highest_thread != highest_thread) {
+ if (prev_highest_thread != nullptr) {
+ IncrementScheduledCount(prev_highest_thread);
+ prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
+ }
+ if (this->state.should_count_idle) {
+ if (highest_thread != nullptr) {
+ // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
+ // process->SetRunningThread(this->core_id, highest_thread,
+ // this->state.idle_count);
+ //}
+ } else {
+ this->state.idle_count++;
+ }
+ }
+
+ this->state.highest_priority_thread = highest_thread;
+ this->state.needs_scheduling = true;
+ return (1ULL << this->core_id);
+ } else {
+ return 0;
+ }
+}
+
+/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ /* Clear that we need to update. */
+ ClearSchedulerUpdateNeeded(kernel);
+
+ u64 cores_needing_scheduling = 0, idle_cores = 0;
+ Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ /* We want to go over all cores, finding the highest priority thread and determining if
+ * scheduling is needed for that core. */
+ for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
+ Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id);
+ if (top_thread != nullptr) {
+ ///* If the thread has no waiters, we need to check if the process has a thread pinned.
+ ///*/
+ // if (top_thread->GetNumKernelWaiters() == 0) {
+ // if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
+ // if (Thread* pinned = parent->GetPinnedThread(core_id);
+ // pinned != nullptr && pinned != top_thread) {
+ // /* We prefer our parent's pinned thread if possible. However, we also
+ // don't
+ // * want to schedule un-runnable threads. */
+ // if (pinned->GetRawState() == Thread::ThreadState_Runnable) {
+ // top_thread = pinned;
+ // } else {
+ // top_thread = nullptr;
+ // }
+ // }
+ // }
+ //}
+ } else {
+ idle_cores |= (1ULL << core_id);
+ }
+
+ top_threads[core_id] = top_thread;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
+ }
+
+ /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */
+ while (idle_cores != 0) {
+ u32 core_id = Common::CountTrailingZeroes64(idle_cores);
+ if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
+ s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
+ size_t num_candidates = 0;
+
+ /* While we have a suggested thread, try to migrate it! */
+ while (suggested != nullptr) {
+ /* Check if the suggested thread is the top thread on its core. */
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_thread =
+ (suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
+ top_thread != suggested) {
+ /* Make sure we're not dealing with threads too high priority for migration. */
+ if (top_thread != nullptr &&
+ top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
+ break;
+ }
+
+ /* The suggested thread isn't bound to its core, so we can migrate it! */
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested);
+
+ top_threads[core_id] = suggested;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
+ break;
+ }
+
+ /* Note this core as a candidate for migration. */
+ ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
+ migration_candidates[num_candidates++] = suggested_core;
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
+ * candidate cores' top threads. */
+ if (suggested == nullptr) {
+ for (size_t i = 0; i < num_candidates; i++) {
+ /* Check if there's some other thread that can run on the candidate core. */
+ const s32 candidate_core = migration_candidates[i];
+ suggested = top_threads[candidate_core];
+ if (Thread* next_on_candidate_core =
+ priority_queue.GetScheduledNext(candidate_core, suggested);
+ next_on_candidate_core != nullptr) {
+ /* The candidate core can run some other thread! We'll migrate its current
+ * top thread to us. */
+ top_threads[candidate_core] = next_on_candidate_core;
+ cores_needing_scheduling |=
+ kernel.Scheduler(candidate_core)
+ .UpdateHighestPriorityThread(top_threads[candidate_core]);
+
+ /* Perform the migration. */
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(candidate_core, suggested);
+
+ top_threads[core_id] = suggested;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(
+ top_threads[core_id]);
+ break;
+ }
+ }
+ }
+ }
+
+ idle_cores &= ~(1ULL << core_id);
+ }
+
+ return cores_needing_scheduling;
+}
+
+void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.push_back(std::move(thread));
+}
+
+void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
+ thread_list.end());
+}
+
+void GlobalSchedulerContext::PreemptThreads() {
+ // The priority levels at which the global scheduler preempts threads every 10 ms. They are
+ // ordered from Core 0 to Core 3.
+ std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 63};
+
+ ASSERT(IsLocked());
+ for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
+ const u32 priority = preemption_priorities[core_id];
+ kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
+ }
+}
+
+bool GlobalSchedulerContext::IsLocked() const {
+ return scheduler_lock.IsLockedByCurrentThread();
+}
+
+/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread,
+ u32 old_state) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ /* Check if the state has changed, because if it hasn't there's nothing to do. */
+ const auto cur_state = thread->scheduling_state;
+ if (cur_state == old_state) {
+ return;
+ }
+
+ /* Update the priority queues. */
+ if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ /* If we were previously runnable, then we're not runnable now, and we should remove. */
+ GetPriorityQueue(kernel).Remove(thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ /* If we're now runnable, then we weren't previously, and we should add. */
+ GetPriorityQueue(kernel).PushBack(thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+/*static*/ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread,
+ Thread* current_thread, u32 old_priority) {
+
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ /* If the thread is runnable, we want to change its priority in the queue. */
+ if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ GetPriorityQueue(kernel).ChangePriority(
+ old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+/*static*/ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
+ const KAffinityMask& old_affinity,
+ s32 old_core) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ /* If the thread is runnable, we want to change its affinity in the queue. */
+ if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
+ ASSERT(system.GlobalSchedulerContext().IsLocked());
+
+ /* Get a reference to the priority queue. */
+ auto& kernel = system.Kernel();
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ /* Rotate the front of the queue to the end. */
+ Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
+ Thread* next_thread = nullptr;
+ if (top_thread != nullptr) {
+ next_thread = priority_queue.MoveToScheduledBack(top_thread);
+ if (next_thread != top_thread) {
+ IncrementScheduledCount(top_thread);
+ IncrementScheduledCount(next_thread);
+ }
+ }
+
+ /* While we have a suggested thread, try to migrate it! */
+ {
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
+ while (suggested != nullptr) {
+ /* Check if the suggested thread is the top thread on its core. */
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ /* If the next thread is a new thread that has been waiting longer than our
+ * suggestion, we prefer it to our suggestion. */
+ if (top_thread != next_thread && next_thread != nullptr &&
+ next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
+ suggested = nullptr;
+ break;
+ }
+
+ /* If we're allowed to do a migration, do one. */
+ /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
+ * to the front of the queue. */
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ }
+ }
+
+ /* Get the next suggestion. */
+ suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
+ }
+ }
+
+ /* Now that we might have migrated a thread with the same priority, check if we can do better.
+ */
+ {
+ Thread* best_thread = priority_queue.GetScheduledFront(core_id);
+ if (best_thread == GetCurrentThread()) {
+ best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
+ }
+
+ /* If the best thread we can choose has a priority the same or worse than ours, try to
+ * migrate a higher priority thread. */
+ if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ /* If the suggestion's priority is the same as ours, don't bother. */
+ if (suggested->GetPriority() >= best_thread->GetPriority()) {
+ break;
+ }
+
+ /* Check if the suggested thread is the top thread on its core. */
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ /* If we're allowed to do a migration, do one. */
+ /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
+ * suggestion to the front of the queue. */
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ }
+ }
+
+ /* Get the next suggestion. */
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+ }
+ }
+
+ /* After a rotation, we need a scheduler update. */
+ SetSchedulerUpdateNeeded(kernel);
+}
+
+/*static*/ bool KScheduler::CanSchedule(KernelCore& kernel) {
+ return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
+}
+
+/*static*/ bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
+}
+
+/*static*/ void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
+}
+
+/*static*/ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
+}
+
+/*static*/ void KScheduler::DisableScheduling(KernelCore& kernel) {
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
+ scheduler->GetCurrentThread()->DisableDispatch();
+ }
+}
+
+/*static*/ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
+ Core::EmuThreadHandle global_thread) {
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ scheduler->GetCurrentThread()->EnableDispatch();
+ }
+ RescheduleCores(kernel, cores_needing_scheduling, global_thread);
+}
+
+/*static*/ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
+ if (IsSchedulerUpdateNeeded(kernel)) {
+ return UpdateHighestPriorityThreadsImpl(kernel);
+ } else {
+ return 0;
+ }
+}
+
+/*static*/ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().priority_queue;
+}
+
+void KScheduler::YieldWithoutCoreMigration() {
+ auto& kernel = system.Kernel();
+
+ /* Validate preconditions. */
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ /* Get the current thread and process. */
+ Thread& cur_thread = *GetCurrentThread();
+ Process& cur_process = *kernel.CurrentProcess();
+
+ /* If the thread's yield count matches, there's nothing for us to do. */
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ /* Get a reference to the priority queue. */
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ /* Perform the yield. */
+ {
+ SchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.scheduling_state;
+ if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ /* Put the current thread at the back of the queue. */
+ Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ /* If the next thread is different, we have an update to perform. */
+ if (next_thread != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else {
+ /* Otherwise, set the thread's yield count so that we won't waste work until the
+ * process is scheduled again. */
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ }
+ }
+}
+
+void KScheduler::YieldWithCoreMigration() {
+ auto& kernel = system.Kernel();
+
+ /* Validate preconditions. */
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ /* Get the current thread and process. */
+ Thread& cur_thread = *GetCurrentThread();
+ Process& cur_process = *kernel.CurrentProcess();
+
+ /* If the thread's yield count matches, there's nothing for us to do. */
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ /* Get a reference to the priority queue. */
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ /* Perform the yield. */
+ {
+ SchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.scheduling_state;
+ if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ /* Get the current active core. */
+ const s32 core_id = cur_thread.GetActiveCore();
+
+ /* Put the current thread at the back of the queue. */
+ Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ /* While we have a suggested thread, try to migrate it! */
+ bool recheck = false;
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ /* Check if the suggested thread is the thread running on its core. */
+ const s32 suggested_core = suggested->GetActiveCore();
+
+ if (Thread* running_on_suggested_core =
+ (suggested_core >= 0)
+ ? kernel.Scheduler(suggested_core).state.highest_priority_thread
+ : nullptr;
+ running_on_suggested_core != suggested) {
+ /* If the current thread's priority is higher than our suggestion's we prefer
+ * the next thread to the suggestion. */
+ /* We also prefer the next thread when the current thread's priority is equal to
+ * the suggestions, but the next thread has been waiting longer. */
+ if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
+ (suggested->GetPriority() == cur_thread.GetPriority() &&
+ next_thread != std::addressof(cur_thread) &&
+ next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
+ suggested = nullptr;
+ break;
+ }
+
+ /* If we're allowed to do a migration, do one. */
+ /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
+ * suggestion to the front of the queue. */
+ if (running_on_suggested_core == nullptr ||
+ running_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ } else {
+ /* We couldn't perform a migration, but we should check again on a future
+ * yield. */
+ recheck = true;
+ }
+ }
+
+ /* Get the next suggestion. */
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ /* If we still have a suggestion or the next thread is different, we have an update to
+ * perform. */
+ if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else if (!recheck) {
+ /* Otherwise if we don't need to re-check, set the thread's yield count so that we
+ * won't waste work until the process is scheduled again. */
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ }
+ }
+}
+
+void KScheduler::YieldToAnyThread() {
+ auto& kernel = system.Kernel();
+
+ /* Validate preconditions. */
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ /* Get the current thread and process. */
+ Thread& cur_thread = *GetCurrentThread();
+ Process& cur_process = *kernel.CurrentProcess();
+
+ /* If the thread's yield count matches, there's nothing for us to do. */
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ /* Get a reference to the priority queue. */
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ /* Perform the yield. */
+ {
+ SchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.scheduling_state;
+ if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ /* Get the current active core. */
+ const s32 core_id = cur_thread.GetActiveCore();
+
+ /* Migrate the current thread to core -1. */
+ cur_thread.SetActiveCore(-1);
+ priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ /* If there's nothing scheduled, we can try to perform a migration. */
+ if (priority_queue.GetScheduledFront(core_id) == nullptr) {
+ /* While we have a suggested thread, try to migrate it! */
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ /* Check if the suggested thread is the top thread on its core. */
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ /* If we're allowed to do a migration, do one. */
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested);
+ IncrementScheduledCount(suggested);
+ }
+
+ /* Regardless of whether we migrated, we had a candidate, so we're done. */
+ break;
+ }
+
+ /* Get the next suggestion. */
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ /* If the suggestion is different from the current thread, we need to perform an
+ * update. */
+ if (suggested != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else {
+ /* Otherwise, set the thread's yield count so that we won't waste work until the
+ * process is scheduled again. */
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ } else {
+ /* Otherwise, we have an update to perform. */
+ SetSchedulerUpdateNeeded(kernel);
+ }
+ }
+ }
+}
+
+void GlobalSchedulerContext::Lock() {
+ scheduler_lock.Lock();
+}
+
+void GlobalSchedulerContext::Unlock() {
+ scheduler_lock.Unlock();
+}
+
+KScheduler::KScheduler(Core::System& system, std::size_t core_id)
+ : system(system), core_id(core_id) {
+ switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
+ this->state.needs_scheduling = true;
+ this->state.interrupt_task_thread_runnable = false;
+ this->state.should_count_idle = false;
+ this->state.idle_count = 0;
+ this->state.idle_thread_stack = nullptr;
+ this->state.highest_priority_thread = nullptr;
+}
+
+KScheduler::~KScheduler() = default;
+
+Thread* KScheduler::GetCurrentThread() const {
+ if (current_thread) {
+ return current_thread;
+ }
+ return idle_thread;
+}
+
+u64 KScheduler::GetLastContextSwitchTicks() const {
+ return last_context_switch_time;
+}
+
+void KScheduler::RescheduleCurrentCore() {
+ ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
+
+ auto& phys_core = system.Kernel().PhysicalCore(core_id);
+ if (phys_core.IsInterrupted()) {
+ phys_core.ClearInterrupt();
+ }
+ guard.lock();
+ if (this->state.needs_scheduling) {
+ Schedule();
+ } else {
+ guard.unlock();
+ }
+}
+
+void KScheduler::OnThreadStart() {
+ SwitchContextStep2();
+}
+
+void KScheduler::Unload(Thread* thread) {
+ if (thread) {
+ thread->SetIsRunning(false);
+ if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
+ system.ArmInterface(core_id).ExceptionalExit();
+ thread->SetContinuousOnSVC(false);
+ }
+ if (!thread->IsHLEThread() && !thread->HasExited()) {
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.SaveContext(thread->GetContext32());
+ cpu_core.SaveContext(thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ }
+ thread->context_guard.unlock();
+ }
+}
+
+void KScheduler::Reload(Thread* thread) {
+ if (thread) {
+ ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
+ "Thread must be runnable.");
+
+ // Cancel any outstanding wakeup events for this thread
+ thread->SetIsRunning(true);
+ thread->SetWasRunning(false);
+
+ auto* const thread_owner_process = thread->GetOwnerProcess();
+ if (thread_owner_process != nullptr) {
+ system.Kernel().MakeCurrentProcess(thread_owner_process);
+ }
+ if (!thread->IsHLEThread()) {
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ }
+ }
+}
+
+void KScheduler::SwitchContextStep2() {
+ // Load context of new thread
+ Reload(current_thread);
+
+ RescheduleCurrentCore();
+}
+
+void KScheduler::ScheduleImpl() {
+ Thread* previous_thread = current_thread;
+ current_thread = state.highest_priority_thread;
+
+ this->state.needs_scheduling = false;
+
+ if (current_thread == previous_thread) {
+ guard.unlock();
+ return;
+ }
+
+ Process* const previous_process = system.Kernel().CurrentProcess();
+
+ UpdateLastContextSwitchTime(previous_thread, previous_process);
+
+ // Save context for previous thread
+ Unload(previous_thread);
+
+ std::shared_ptr<Common::Fiber>* old_context;
+ if (previous_thread != nullptr) {
+ old_context = &previous_thread->GetHostContext();
+ } else {
+ old_context = &idle_thread->GetHostContext();
+ }
+ guard.unlock();
+
+ Common::Fiber::YieldTo(*old_context, switch_fiber);
+ /// When a thread wakes up, the scheduler may have changed to other in another core.
+ auto& next_scheduler = *system.Kernel().CurrentScheduler();
+ next_scheduler.SwitchContextStep2();
+}
+
+void KScheduler::OnSwitch(void* this_scheduler) {
+ KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
+ sched->SwitchToCurrent();
+}
+
+void KScheduler::SwitchToCurrent() {
+ while (true) {
+ {
+ std::scoped_lock lock{guard};
+ current_thread = state.highest_priority_thread;
+ this->state.needs_scheduling = false;
+ }
+ const auto is_switch_pending = [this] {
+ std::scoped_lock lock{guard};
+ return !!this->state.needs_scheduling;
+ };
+ do {
+ if (current_thread != nullptr && !current_thread->IsHLEThread()) {
+ current_thread->context_guard.lock();
+ if (!current_thread->IsRunnable()) {
+ current_thread->context_guard.unlock();
+ break;
+ }
+ if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
+ current_thread->context_guard.unlock();
+ break;
+ }
+ }
+ std::shared_ptr<Common::Fiber>* next_context;
+ if (current_thread != nullptr) {
+ next_context = &current_thread->GetHostContext();
+ } else {
+ next_context = &idle_thread->GetHostContext();
+ }
+ Common::Fiber::YieldTo(switch_fiber, *next_context);
+ } while (!is_switch_pending());
+ }
+}
+
+void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
+ const u64 prev_switch_ticks = last_context_switch_time;
+ const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
+ const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
+
+ if (thread != nullptr) {
+ thread->UpdateCPUTimeTicks(update_ticks);
+ }
+
+ if (process != nullptr) {
+ process->UpdateCPUTimeTicks(update_ticks);
+ }
+
+ last_context_switch_time = most_recent_switch_ticks;
+}
+
+void KScheduler::Initialize() {
+ std::string name = "Idle Thread Id:" + std::to_string(core_id);
+ std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
+ auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
+ nullptr, std::move(init_func), init_func_parameter);
+ idle_thread = thread_res.Unwrap().get();
+
+ {
+ KScopedSchedulerLock lock{system.Kernel()};
+ idle_thread->SetStatus(ThreadStatus::Ready);
+ }
+}
+
+SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
+ kernel.GlobalSchedulerContext().Lock();
+}
+
+SchedulerLock::~SchedulerLock() {
+ kernel.GlobalSchedulerContext().Unlock();
+}
+
+SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
+ Thread* time_task, s64 nanoseconds)
+ : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
+ nanoseconds} {
+ event_handle = InvalidHandle;
+}
+
+SchedulerLockAndSleep::~SchedulerLockAndSleep() {
+ if (sleep_cancelled) {
+ return;
+ }
+ auto& time_manager = kernel.TimeManager();
+ time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
+}
+
+void SchedulerLockAndSleep::Release() {
+ if (sleep_cancelled) {
+ return;
+ }
+ auto& time_manager = kernel.TimeManager();
+ time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
+ sleep_cancelled = true;
+}
+
+} // namespace Kernel