summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp27
-rw-r--r--src/core/hle/kernel/address_arbiter.h3
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp52
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h81
-rw-r--r--src/core/hle/kernel/handle_table.cpp6
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp41
-rw-r--r--src/core/hle/kernel/hle_ipc.h23
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h58
-rw-r--r--src/core/hle/kernel/k_priority_queue.h451
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp784
-rw-r--r--src/core/hle/kernel/k_scheduler.h201
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h75
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h41
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h50
-rw-r--r--src/core/hle/kernel/kernel.cpp201
-rw-r--r--src/core/hle/kernel/kernel.h39
-rw-r--r--src/core/hle/kernel/memory/address_space_info.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_block.h20
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.h4
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp19
-rw-r--r--src/core/hle/kernel/mutex.cpp12
-rw-r--r--src/core/hle/kernel/physical_core.cpp52
-rw-r--r--src/core/hle/kernel/physical_core.h44
-rw-r--r--src/core/hle/kernel/process.cpp17
-rw-r--r--src/core/hle/kernel/process.h13
-rw-r--r--src/core/hle/kernel/process_capability.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.cpp4
-rw-r--r--src/core/hle/kernel/resource_limit.cpp4
-rw-r--r--src/core/hle/kernel/scheduler.cpp849
-rw-r--r--src/core/hle/kernel/scheduler.h318
-rw-r--r--src/core/hle/kernel/server_session.cpp36
-rw-r--r--src/core/hle/kernel/server_session.h12
-rw-r--r--src/core/hle/kernel/service_thread.cpp110
-rw-r--r--src/core/hle/kernel/service_thread.h28
-rw-r--r--src/core/hle/kernel/svc.cpp149
-rw-r--r--src/core/hle/kernel/svc_types.h4
-rw-r--r--src/core/hle/kernel/synchronization.cpp11
-rw-r--r--src/core/hle/kernel/synchronization_object.h3
-rw-r--r--src/core/hle/kernel/thread.cpp120
-rw-r--r--src/core/hle/kernel/thread.h120
-rw-r--r--src/core/hle/kernel/time_manager.cpp26
-rw-r--r--src/core/hle/kernel/time_manager.h2
42 files changed, 2449 insertions, 1665 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index b882eaa0f..20ffa7d47 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -12,8 +12,9 @@
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
@@ -58,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
}
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
const std::vector<std::shared_ptr<Thread>> waiting_threads =
GetThreadsWaitingOnAddress(address);
WakeThreads(waiting_threads, num_to_wake);
@@ -67,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
@@ -92,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
@@ -153,11 +154,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
bool should_decrement) {
auto& memory = system.Memory();
auto& kernel = system.Kernel();
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
+ Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
Handle event_handle = InvalidHandle;
{
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
+ KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
if (current_thread->IsPendingTermination()) {
lock.CancelSleep();
@@ -210,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
}
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (current_thread->IsWaitingForArbitration()) {
RemoveThread(SharedFrom(current_thread));
current_thread->WaitForArbitration(false);
@@ -223,11 +224,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
auto& memory = system.Memory();
auto& kernel = system.Kernel();
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
+ Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
Handle event_handle = InvalidHandle;
{
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
+ KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
if (current_thread->IsPendingTermination()) {
lock.CancelSleep();
@@ -265,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
}
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (current_thread->IsWaitingForArbitration()) {
RemoveThread(SharedFrom(current_thread));
current_thread->WaitForArbitration(false);
@@ -275,12 +276,6 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
return current_thread->GetSignalingResult();
}
-void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
- ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
- RemoveThread(thread);
- thread->SetArbiterWaitAddress(0);
-}
-
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
const VAddr arb_addr = thread->GetArbiterWaitAddress();
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
index 0b05d533c..b91edc67d 100644
--- a/src/core/hle/kernel/address_arbiter.h
+++ b/src/core/hle/kernel/address_arbiter.h
@@ -50,9 +50,6 @@ public:
/// Waits on an address with a particular arbitration type.
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
- /// Removes a thread from the container and resets its address arbiter adress to 0
- void HandleWakeupThread(std::shared_ptr<Thread> thread);
-
private:
/// Signals an address being waited on.
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
new file mode 100644
index 000000000..a133e8ed0
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -0,0 +1,52 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <mutex>
+
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/hle/kernel/global_scheduler_context.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
+ : kernel{kernel}, scheduler_lock{kernel} {}
+
+GlobalSchedulerContext::~GlobalSchedulerContext() = default;
+
+void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.push_back(std::move(thread));
+}
+
+void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
+ std::scoped_lock lock{global_list_guard};
+ thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
+ thread_list.end());
+}
+
+void GlobalSchedulerContext::PreemptThreads() {
+ // The priority levels at which the global scheduler preempts threads every 10 ms. They are
+ // ordered from Core 0 to Core 3.
+ static constexpr std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities{
+ 59,
+ 59,
+ 59,
+ 63,
+ };
+
+ ASSERT(IsLocked());
+ for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
+ const u32 priority = preemption_priorities[core_id];
+ kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
+ }
+}
+
+bool GlobalSchedulerContext::IsLocked() const {
+ return scheduler_lock.IsLockedByCurrentThread();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
new file mode 100644
index 000000000..5c7b89290
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -0,0 +1,81 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <atomic>
+#include <vector>
+
+#include "common/common_types.h"
+#include "common/spin_lock.h"
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/k_priority_queue.h"
+#include "core/hle/kernel/k_scheduler_lock.h"
+#include "core/hle/kernel/thread.h"
+
+namespace Kernel {
+
+class KernelCore;
+class SchedulerLock;
+
+using KSchedulerPriorityQueue =
+ KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
+constexpr s32 HighestCoreMigrationAllowedPriority = 2;
+
+class GlobalSchedulerContext final {
+ friend class KScheduler;
+
+public:
+ using LockType = KAbstractSchedulerLock<KScheduler>;
+
+ explicit GlobalSchedulerContext(KernelCore& kernel);
+ ~GlobalSchedulerContext();
+
+ /// Adds a new thread to the scheduler
+ void AddThread(std::shared_ptr<Thread> thread);
+
+ /// Removes a thread from the scheduler
+ void RemoveThread(std::shared_ptr<Thread> thread);
+
+ /// Returns a list of all threads managed by the scheduler
+ [[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
+ return thread_list;
+ }
+
+ /**
+ * Rotates the scheduling queues of threads at a preemption priority and then does
+ * some core rebalancing. Preemption priorities can be found in the array
+ * 'preemption_priorities'.
+ *
+ * @note This operation happens every 10ms.
+ */
+ void PreemptThreads();
+
+ /// Returns true if the global scheduler lock is acquired
+ bool IsLocked() const;
+
+ [[nodiscard]] LockType& SchedulerLock() {
+ return scheduler_lock;
+ }
+
+ [[nodiscard]] const LockType& SchedulerLock() const {
+ return scheduler_lock;
+ }
+
+private:
+ friend class KScopedSchedulerLock;
+ friend class KScopedSchedulerLockAndSleep;
+
+ KernelCore& kernel;
+
+ std::atomic_bool scheduler_update_needed{};
+ KSchedulerPriorityQueue priority_queue;
+ LockType scheduler_lock;
+
+ /// Lists all thread ids that aren't deleted/etc.
+ std::vector<std::shared_ptr<Thread>> thread_list;
+ Common::SpinLock global_list_guard{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index fb30b6f8b..40988b0fd 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -8,9 +8,9 @@
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
@@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const {
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
if (handle == CurrentThread) {
- return SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
+ return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
} else if (handle == CurrentProcess) {
return SharedFrom(kernel.CurrentProcess());
}
@@ -118,7 +118,7 @@ std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
void HandleTable::Clear() {
for (u16 i = 0; i < table_size; ++i) {
- generations[i] = i + 1;
+ generations[i] = static_cast<u16>(i + 1);
objects[i] = nullptr;
}
next_free_slot = 0;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 81f85643b..83decf6cf 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -17,11 +17,12 @@
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
@@ -45,44 +46,6 @@ void SessionRequestHandler::ClientDisconnected(
boost::range::remove_erase(connected_sessions, server_session);
}
-std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
- const std::string& reason, u64 timeout, WakeupCallback&& callback,
- std::shared_ptr<WritableEvent> writable_event) {
- // Put the client thread to sleep until the wait event is signaled or the timeout expires.
-
- if (!writable_event) {
- // Create event if not provided
- const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason);
- writable_event = pair.writable;
- }
-
- {
- Handle event_handle = InvalidHandle;
- SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
- thread->SetHLECallback(
- [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
- ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
- ? ThreadWakeupReason::Timeout
- : ThreadWakeupReason::Signal;
- callback(thread, context, reason);
- context.WriteToOutgoingCommandBuffer(*thread);
- return true;
- });
- const auto readable_event{writable_event->GetReadableEvent()};
- writable_event->Clear();
- thread->SetHLESyncObject(readable_event.get());
- thread->SetStatus(ThreadStatus::WaitHLEEvent);
- thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
- readable_event->AddWaitingThread(thread);
- lock.Release();
- thread->SetHLETimeEvent(event_handle);
- }
-
- is_thread_waiting = true;
-
- return writable_event;
-}
-
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> server_session,
std::shared_ptr<Thread> thread)
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index f3277b766..b112e1ebd 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -24,6 +24,10 @@ namespace Core::Memory {
class Memory;
}
+namespace IPC {
+class ResponseBuilder;
+}
+
namespace Service {
class ServiceFrameworkBase;
}
@@ -125,23 +129,6 @@ public:
using WakeupCallback = std::function<void(
std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
- /**
- * Puts the specified guest thread to sleep until the returned event is signaled or until the
- * specified timeout expires.
- * @param reason Reason for pausing the thread, to be used for debugging purposes.
- * @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback
- * invoked with a Timeout reason.
- * @param callback Callback to be invoked when the thread is resumed. This callback must write
- * the entire command response once again, regardless of the state of it before this function
- * was called.
- * @param writable_event Event to use to wake up the thread. If unspecified, an event will be
- * created.
- * @returns Event that when signaled will resume the thread and call the callback function.
- */
- std::shared_ptr<WritableEvent> SleepClientThread(
- const std::string& reason, u64 timeout, WakeupCallback&& callback,
- std::shared_ptr<WritableEvent> writable_event = nullptr);
-
/// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
u32_le* src_cmdbuf);
@@ -287,6 +274,8 @@ public:
}
private:
+ friend class IPC::ResponseBuilder;
+
void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
new file mode 100644
index 000000000..dd73781cd
--- /dev/null
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -0,0 +1,58 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hardware_properties.h"
+
+namespace Kernel {
+
+class KAffinityMask {
+public:
+ constexpr KAffinityMask() = default;
+
+ [[nodiscard]] constexpr u64 GetAffinityMask() const {
+ return this->mask;
+ }
+
+ constexpr void SetAffinityMask(u64 new_mask) {
+ ASSERT((new_mask & ~AllowedAffinityMask) == 0);
+ this->mask = new_mask;
+ }
+
+ [[nodiscard]] constexpr bool GetAffinity(s32 core) const {
+ return this->mask & GetCoreBit(core);
+ }
+
+ constexpr void SetAffinity(s32 core, bool set) {
+ ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+
+ if (set) {
+ this->mask |= GetCoreBit(core);
+ } else {
+ this->mask &= ~GetCoreBit(core);
+ }
+ }
+
+ constexpr void SetAll() {
+ this->mask = AllowedAffinityMask;
+ }
+
+private:
+ [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
+ ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ return (1ULL << core);
+ }
+
+ static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
+
+ u64 mask{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
new file mode 100644
index 000000000..99fb8fe93
--- /dev/null
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -0,0 +1,451 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include <array>
+#include <concepts>
+
+#include "common/assert.h"
+#include "common/bit_set.h"
+#include "common/bit_util.h"
+#include "common/common_types.h"
+#include "common/concepts.h"
+
+namespace Kernel {
+
+class Thread;
+
+template <typename T>
+concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
+ { t.GetAffinityMask() }
+ ->Common::ConvertibleTo<u64>;
+ {t.SetAffinityMask(std::declval<u64>())};
+
+ { t.GetAffinity(std::declval<int32_t>()) }
+ ->std::same_as<bool>;
+ {t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
+ {t.SetAll()};
+};
+
+template <typename T>
+concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
+ {typename T::QueueEntry()};
+ {(typename T::QueueEntry()).Initialize()};
+ {(typename T::QueueEntry()).SetPrev(std::addressof(t))};
+ {(typename T::QueueEntry()).SetNext(std::addressof(t))};
+ { (typename T::QueueEntry()).GetNext() }
+ ->std::same_as<T*>;
+ { (typename T::QueueEntry()).GetPrev() }
+ ->std::same_as<T*>;
+ { t.GetPriorityQueueEntry(std::declval<s32>()) }
+ ->std::same_as<typename T::QueueEntry&>;
+
+ {t.GetAffinityMask()};
+ { typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
+ ->KPriorityQueueAffinityMask;
+
+ { t.GetActiveCore() }
+ ->Common::ConvertibleTo<s32>;
+ { t.GetPriority() }
+ ->Common::ConvertibleTo<s32>;
+};
+
+template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
+requires KPriorityQueueMember<Member> class KPriorityQueue {
+public:
+ using AffinityMaskType = typename std::remove_cv_t<
+ typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
+
+ static_assert(LowestPriority >= 0);
+ static_assert(HighestPriority >= 0);
+ static_assert(LowestPriority >= HighestPriority);
+ static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
+ static constexpr size_t NumCores = _NumCores;
+
+ static constexpr bool IsValidCore(s32 core) {
+ return 0 <= core && core < static_cast<s32>(NumCores);
+ }
+
+ static constexpr bool IsValidPriority(s32 priority) {
+ return HighestPriority <= priority && priority <= LowestPriority + 1;
+ }
+
+private:
+ using Entry = typename Member::QueueEntry;
+
+public:
+ class KPerCoreQueue {
+ private:
+ std::array<Entry, NumCores> root{};
+
+ public:
+ constexpr KPerCoreQueue() {
+ for (auto& per_core_root : root) {
+ per_core_root.Initialize();
+ }
+ }
+
+ constexpr bool PushBack(s32 core, Member* member) {
+ // Get the entry associated with the member.
+ Entry& member_entry = member->GetPriorityQueueEntry(core);
+
+ // Get the entry associated with the end of the queue.
+ Member* tail = this->root[core].GetPrev();
+ Entry& tail_entry =
+ (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
+
+ // Link the entries.
+ member_entry.SetPrev(tail);
+ member_entry.SetNext(nullptr);
+ tail_entry.SetNext(member);
+ this->root[core].SetPrev(member);
+
+ return tail == nullptr;
+ }
+
+ constexpr bool PushFront(s32 core, Member* member) {
+ // Get the entry associated with the member.
+ Entry& member_entry = member->GetPriorityQueueEntry(core);
+
+ // Get the entry associated with the front of the queue.
+ Member* head = this->root[core].GetNext();
+ Entry& head_entry =
+ (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
+
+ // Link the entries.
+ member_entry.SetPrev(nullptr);
+ member_entry.SetNext(head);
+ head_entry.SetPrev(member);
+ this->root[core].SetNext(member);
+
+ return (head == nullptr);
+ }
+
+ constexpr bool Remove(s32 core, Member* member) {
+ // Get the entry associated with the member.
+ Entry& member_entry = member->GetPriorityQueueEntry(core);
+
+ // Get the entries associated with next and prev.
+ Member* prev = member_entry.GetPrev();
+ Member* next = member_entry.GetNext();
+ Entry& prev_entry =
+ (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
+ Entry& next_entry =
+ (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
+
+ // Unlink.
+ prev_entry.SetNext(next);
+ next_entry.SetPrev(prev);
+
+ return (this->GetFront(core) == nullptr);
+ }
+
+ constexpr Member* GetFront(s32 core) const {
+ return this->root[core].GetNext();
+ }
+ };
+
+ class KPriorityQueueImpl {
+ public:
+ constexpr KPriorityQueueImpl() = default;
+
+ constexpr void PushBack(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority > LowestPriority) {
+ return;
+ }
+
+ if (this->queues[priority].PushBack(core, member)) {
+ this->available_priorities[core].SetBit(priority);
+ }
+ }
+
+ constexpr void PushFront(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority > LowestPriority) {
+ return;
+ }
+
+ if (this->queues[priority].PushFront(core, member)) {
+ this->available_priorities[core].SetBit(priority);
+ }
+ }
+
+ constexpr void Remove(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority > LowestPriority) {
+ return;
+ }
+
+ if (this->queues[priority].Remove(core, member)) {
+ this->available_priorities[core].ClearBit(priority);
+ }
+ }
+
+ constexpr Member* GetFront(s32 core) const {
+ ASSERT(IsValidCore(core));
+
+ const s32 priority =
+ static_cast<s32>(this->available_priorities[core].CountLeadingZero());
+ if (priority <= LowestPriority) {
+ return this->queues[priority].GetFront(core);
+ } else {
+ return nullptr;
+ }
+ }
+
+ constexpr Member* GetFront(s32 priority, s32 core) const {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority <= LowestPriority) {
+ return this->queues[priority].GetFront(core);
+ } else {
+ return nullptr;
+ }
+ }
+
+ constexpr Member* GetNext(s32 core, const Member* member) const {
+ ASSERT(IsValidCore(core));
+
+ Member* next = member->GetPriorityQueueEntry(core).GetNext();
+ if (next == nullptr) {
+ const s32 priority = static_cast<s32>(
+ this->available_priorities[core].GetNextSet(member->GetPriority()));
+ if (priority <= LowestPriority) {
+ next = this->queues[priority].GetFront(core);
+ }
+ }
+ return next;
+ }
+
+ constexpr void MoveToFront(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority <= LowestPriority) {
+ this->queues[priority].Remove(core, member);
+ this->queues[priority].PushFront(core, member);
+ }
+ }
+
+ constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) {
+ ASSERT(IsValidCore(core));
+ ASSERT(IsValidPriority(priority));
+
+ if (priority <= LowestPriority) {
+ this->queues[priority].Remove(core, member);
+ this->queues[priority].PushBack(core, member);
+ return this->queues[priority].GetFront(core);
+ } else {
+ return nullptr;
+ }
+ }
+
+ private:
+ std::array<KPerCoreQueue, NumPriority> queues{};
+ std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{};
+ };
+
+private:
+ KPriorityQueueImpl scheduled_queue;
+ KPriorityQueueImpl suggested_queue;
+
+private:
+ constexpr void ClearAffinityBit(u64& affinity, s32 core) {
+ affinity &= ~(u64(1) << core);
+ }
+
+ constexpr s32 GetNextCore(u64& affinity) {
+ const s32 core = Common::CountTrailingZeroes64(affinity);
+ ClearAffinityBit(affinity, core);
+ return core;
+ }
+
+ constexpr void PushBack(s32 priority, Member* member) {
+ ASSERT(IsValidPriority(priority));
+
+ // Push onto the scheduled queue for its core, if we can.
+ u64 affinity = member->GetAffinityMask().GetAffinityMask();
+ if (const s32 core = member->GetActiveCore(); core >= 0) {
+ this->scheduled_queue.PushBack(priority, core, member);
+ ClearAffinityBit(affinity, core);
+ }
+
+ // And suggest the thread for all other cores.
+ while (affinity) {
+ this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
+ }
+ }
+
+ constexpr void PushFront(s32 priority, Member* member) {
+ ASSERT(IsValidPriority(priority));
+
+ // Push onto the scheduled queue for its core, if we can.
+ u64 affinity = member->GetAffinityMask().GetAffinityMask();
+ if (const s32 core = member->GetActiveCore(); core >= 0) {
+ this->scheduled_queue.PushFront(priority, core, member);
+ ClearAffinityBit(affinity, core);
+ }
+
+ // And suggest the thread for all other cores.
+ // Note: Nintendo pushes onto the back of the suggested queue, not the front.
+ while (affinity) {
+ this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
+ }
+ }
+
+ constexpr void Remove(s32 priority, Member* member) {
+ ASSERT(IsValidPriority(priority));
+
+ // Remove from the scheduled queue for its core.
+ u64 affinity = member->GetAffinityMask().GetAffinityMask();
+ if (const s32 core = member->GetActiveCore(); core >= 0) {
+ this->scheduled_queue.Remove(priority, core, member);
+ ClearAffinityBit(affinity, core);
+ }
+
+ // Remove from the suggested queue for all other cores.
+ while (affinity) {
+ this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
+ }
+ }
+
+public:
+ constexpr KPriorityQueue() = default;
+
+ // Getters.
+ constexpr Member* GetScheduledFront(s32 core) const {
+ return this->scheduled_queue.GetFront(core);
+ }
+
+ constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
+ return this->scheduled_queue.GetFront(priority, core);
+ }
+
+ constexpr Member* GetSuggestedFront(s32 core) const {
+ return this->suggested_queue.GetFront(core);
+ }
+
+ constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
+ return this->suggested_queue.GetFront(priority, core);
+ }
+
+ constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
+ return this->scheduled_queue.GetNext(core, member);
+ }
+
+ constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
+ return this->suggested_queue.GetNext(core, member);
+ }
+
+ constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
+ return member->GetPriorityQueueEntry(core).GetNext();
+ }
+
+ // Mutators.
+ constexpr void PushBack(Member* member) {
+ this->PushBack(member->GetPriority(), member);
+ }
+
+ constexpr void Remove(Member* member) {
+ this->Remove(member->GetPriority(), member);
+ }
+
+ constexpr void MoveToScheduledFront(Member* member) {
+ this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
+ }
+
+ constexpr Thread* MoveToScheduledBack(Member* member) {
+ return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
+ member);
+ }
+
+ // First class fancy operations.
+ constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
+ ASSERT(IsValidPriority(prev_priority));
+
+ // Remove the member from the queues.
+ const s32 new_priority = member->GetPriority();
+ this->Remove(prev_priority, member);
+
+ // And enqueue. If the member is running, we want to keep it running.
+ if (is_running) {
+ this->PushFront(new_priority, member);
+ } else {
+ this->PushBack(new_priority, member);
+ }
+ }
+
+ constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
+ Member* member) {
+ // Get the new information.
+ const s32 priority = member->GetPriority();
+ const AffinityMaskType& new_affinity = member->GetAffinityMask();
+ const s32 new_core = member->GetActiveCore();
+
+ // Remove the member from all queues it was in before.
+ for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
+ if (prev_affinity.GetAffinity(core)) {
+ if (core == prev_core) {
+ this->scheduled_queue.Remove(priority, core, member);
+ } else {
+ this->suggested_queue.Remove(priority, core, member);
+ }
+ }
+ }
+
+ // And add the member to all queues it should be in now.
+ for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
+ if (new_affinity.GetAffinity(core)) {
+ if (core == new_core) {
+ this->scheduled_queue.PushBack(priority, core, member);
+ } else {
+ this->suggested_queue.PushBack(priority, core, member);
+ }
+ }
+ }
+ }
+
+ constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
+ // Get the new information.
+ const s32 new_core = member->GetActiveCore();
+ const s32 priority = member->GetPriority();
+
+ // We don't need to do anything if the core is the same.
+ if (prev_core != new_core) {
+ // Remove from the scheduled queue for the previous core.
+ if (prev_core >= 0) {
+ this->scheduled_queue.Remove(priority, prev_core, member);
+ }
+
+ // Remove from the suggested queue and add to the scheduled queue for the new core.
+ if (new_core >= 0) {
+ this->suggested_queue.Remove(priority, new_core, member);
+ if (to_front) {
+ this->scheduled_queue.PushFront(priority, new_core, member);
+ } else {
+ this->scheduled_queue.PushBack(priority, new_core, member);
+ }
+ }
+
+ // Add to the suggested queue for the previous core.
+ if (prev_core >= 0) {
+ this->suggested_queue.PushBack(priority, prev_core, member);
+ }
+ }
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
new file mode 100644
index 000000000..c5fd82a6b
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -0,0 +1,784 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/fiber.h"
+#include "common/logging/log.h"
+#include "core/arm/arm_interface.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/cpu_manager.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
+#include "core/hle/kernel/process.h"
+#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
+
+namespace Kernel {
+
+static void IncrementScheduledCount(Kernel::Thread* thread) {
+ if (auto process = thread->GetOwnerProcess(); process) {
+ process->IncrementScheduledCount();
+ }
+}
+
+void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
+ Core::EmuThreadHandle global_thread) {
+ u32 current_core = global_thread.host_handle;
+ bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
+ (current_core < Core::Hardware::NUM_CPU_CORES);
+
+ while (cores_pending_reschedule != 0) {
+ u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
+ ASSERT(core < Core::Hardware::NUM_CPU_CORES);
+ if (!must_context_switch || core != current_core) {
+ auto& phys_core = kernel.PhysicalCore(core);
+ phys_core.Interrupt();
+ } else {
+ must_context_switch = true;
+ }
+ cores_pending_reschedule &= ~(1ULL << core);
+ }
+ if (must_context_switch) {
+ auto core_scheduler = kernel.CurrentScheduler();
+ kernel.ExitSVCProfile();
+ core_scheduler->RescheduleCurrentCore();
+ kernel.EnterSVCProfile();
+ }
+}
+
+u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
+ std::scoped_lock lock{guard};
+ if (Thread* prev_highest_thread = this->state.highest_priority_thread;
+ prev_highest_thread != highest_thread) {
+ if (prev_highest_thread != nullptr) {
+ IncrementScheduledCount(prev_highest_thread);
+ prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
+ }
+ if (this->state.should_count_idle) {
+ if (highest_thread != nullptr) {
+ // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
+ // process->SetRunningThread(this->core_id, highest_thread,
+ // this->state.idle_count);
+ //}
+ } else {
+ this->state.idle_count++;
+ }
+ }
+
+ this->state.highest_priority_thread = highest_thread;
+ this->state.needs_scheduling = true;
+ return (1ULL << this->core_id);
+ } else {
+ return 0;
+ }
+}
+
+u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Clear that we need to update.
+ ClearSchedulerUpdateNeeded(kernel);
+
+ u64 cores_needing_scheduling = 0, idle_cores = 0;
+ Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ /// We want to go over all cores, finding the highest priority thread and determining if
+ /// scheduling is needed for that core.
+ for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
+ Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
+ if (top_thread != nullptr) {
+ // If the thread has no waiters, we need to check if the process has a thread pinned.
+ // TODO(bunnei): Implement thread pinning
+ } else {
+ idle_cores |= (1ULL << core_id);
+ }
+
+ top_threads[core_id] = top_thread;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
+ }
+
+ // Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
+ while (idle_cores != 0) {
+ u32 core_id = Common::CountTrailingZeroes64(idle_cores);
+ if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
+ s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
+ size_t num_candidates = 0;
+
+ // While we have a suggested thread, try to migrate it!
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_thread =
+ (suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
+ top_thread != suggested) {
+ // Make sure we're not dealing with threads too high priority for migration.
+ if (top_thread != nullptr &&
+ top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
+ break;
+ }
+
+ // The suggested thread isn't bound to its core, so we can migrate it!
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested);
+
+ top_threads[core_id] = suggested;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
+ break;
+ }
+
+ // Note this core as a candidate for migration.
+ ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
+ migration_candidates[num_candidates++] = suggested_core;
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ // If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
+ // candidate cores' top threads.
+ if (suggested == nullptr) {
+ for (size_t i = 0; i < num_candidates; i++) {
+ // Check if there's some other thread that can run on the candidate core.
+ const s32 candidate_core = migration_candidates[i];
+ suggested = top_threads[candidate_core];
+ if (Thread* next_on_candidate_core =
+ priority_queue.GetScheduledNext(candidate_core, suggested);
+ next_on_candidate_core != nullptr) {
+ // The candidate core can run some other thread! We'll migrate its current
+ // top thread to us.
+ top_threads[candidate_core] = next_on_candidate_core;
+ cores_needing_scheduling |=
+ kernel.Scheduler(candidate_core)
+ .UpdateHighestPriorityThread(top_threads[candidate_core]);
+
+ // Perform the migration.
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(candidate_core, suggested);
+
+ top_threads[core_id] = suggested;
+ cores_needing_scheduling |=
+ kernel.Scheduler(core_id).UpdateHighestPriorityThread(
+ top_threads[core_id]);
+ break;
+ }
+ }
+ }
+ }
+
+ idle_cores &= ~(1ULL << core_id);
+ }
+
+ return cores_needing_scheduling;
+}
+
+void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // Check if the state has changed, because if it hasn't there's nothing to do.
+ const auto cur_state = thread->scheduling_state;
+ if (cur_state == old_state) {
+ return;
+ }
+
+ // Update the priority queues.
+ if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ // If we were previously runnable, then we're not runnable now, and we should remove.
+ GetPriorityQueue(kernel).Remove(thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ // If we're now runnable, then we weren't previously, and we should add.
+ GetPriorityQueue(kernel).PushBack(thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
+ u32 old_priority) {
+
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // If the thread is runnable, we want to change its priority in the queue.
+ if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ GetPriorityQueue(kernel).ChangePriority(
+ old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
+ const KAffinityMask& old_affinity, s32 old_core) {
+ ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+
+ // If the thread is runnable, we want to change its affinity in the queue.
+ if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
+ IncrementScheduledCount(thread);
+ SetSchedulerUpdateNeeded(kernel);
+ }
+}
+
+void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
+ ASSERT(system.GlobalSchedulerContext().IsLocked());
+
+ // Get a reference to the priority queue.
+ auto& kernel = system.Kernel();
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Rotate the front of the queue to the end.
+ Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
+ Thread* next_thread = nullptr;
+ if (top_thread != nullptr) {
+ next_thread = priority_queue.MoveToScheduledBack(top_thread);
+ if (next_thread != top_thread) {
+ IncrementScheduledCount(top_thread);
+ IncrementScheduledCount(next_thread);
+ }
+ }
+
+ // While we have a suggested thread, try to migrate it!
+ {
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ // If the next thread is a new thread that has been waiting longer than our
+ // suggestion, we prefer it to our suggestion.
+ if (top_thread != next_thread && next_thread != nullptr &&
+ next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
+ suggested = nullptr;
+ break;
+ }
+
+ // If we're allowed to do a migration, do one.
+ // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
+ // to the front of the queue.
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ }
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
+ }
+ }
+
+ // Now that we might have migrated a thread with the same priority, check if we can do better.
+
+ {
+ Thread* best_thread = priority_queue.GetScheduledFront(core_id);
+ if (best_thread == GetCurrentThread()) {
+ best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
+ }
+
+ // If the best thread we can choose has a priority the same or worse than ours, try to
+ // migrate a higher priority thread.
+ if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ // If the suggestion's priority is the same as ours, don't bother.
+ if (suggested->GetPriority() >= best_thread->GetPriority()) {
+ break;
+ }
+
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ // If we're allowed to do a migration, do one.
+ // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
+ // suggestion to the front of the queue.
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ }
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+ }
+ }
+
+ // After a rotation, we need a scheduler update.
+ SetSchedulerUpdateNeeded(kernel);
+}
+
+bool KScheduler::CanSchedule(KernelCore& kernel) {
+ return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
+}
+
+bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
+}
+
+void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
+}
+
+void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
+}
+
+void KScheduler::DisableScheduling(KernelCore& kernel) {
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
+ scheduler->GetCurrentThread()->DisableDispatch();
+ }
+}
+
+void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
+ Core::EmuThreadHandle global_thread) {
+ if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
+ scheduler->GetCurrentThread()->EnableDispatch();
+ }
+ RescheduleCores(kernel, cores_needing_scheduling, global_thread);
+}
+
+u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
+ if (IsSchedulerUpdateNeeded(kernel)) {
+ return UpdateHighestPriorityThreadsImpl(kernel);
+ } else {
+ return 0;
+ }
+}
+
+KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().priority_queue;
+}
+
+void KScheduler::YieldWithoutCoreMigration() {
+ auto& kernel = system.Kernel();
+
+ // Validate preconditions.
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ // Get the current thread and process.
+ Thread& cur_thread = *GetCurrentThread();
+ Process& cur_process = *kernel.CurrentProcess();
+
+ // If the thread's yield count matches, there's nothing for us to do.
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ // Get a reference to the priority queue.
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Perform the yield.
+ {
+ KScopedSchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.scheduling_state;
+ if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ // Put the current thread at the back of the queue.
+ Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ // If the next thread is different, we have an update to perform.
+ if (next_thread != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else {
+ // Otherwise, set the thread's yield count so that we won't waste work until the
+ // process is scheduled again.
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ }
+ }
+}
+
+void KScheduler::YieldWithCoreMigration() {
+ auto& kernel = system.Kernel();
+
+ // Validate preconditions.
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ // Get the current thread and process.
+ Thread& cur_thread = *GetCurrentThread();
+ Process& cur_process = *kernel.CurrentProcess();
+
+ // If the thread's yield count matches, there's nothing for us to do.
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ // Get a reference to the priority queue.
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Perform the yield.
+ {
+ KScopedSchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.scheduling_state;
+ if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ // Get the current active core.
+ const s32 core_id = cur_thread.GetActiveCore();
+
+ // Put the current thread at the back of the queue.
+ Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ // While we have a suggested thread, try to migrate it!
+ bool recheck = false;
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the thread running on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+
+ if (Thread* running_on_suggested_core =
+ (suggested_core >= 0)
+ ? kernel.Scheduler(suggested_core).state.highest_priority_thread
+ : nullptr;
+ running_on_suggested_core != suggested) {
+ // If the current thread's priority is higher than our suggestion's we prefer
+ // the next thread to the suggestion. We also prefer the next thread when the
+ // current thread's priority is equal to the suggestions, but the next thread
+ // has been waiting longer.
+ if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
+ (suggested->GetPriority() == cur_thread.GetPriority() &&
+ next_thread != std::addressof(cur_thread) &&
+ next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
+ suggested = nullptr;
+ break;
+ }
+
+ // If we're allowed to do a migration, do one.
+ // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
+ // suggestion to the front of the queue.
+ if (running_on_suggested_core == nullptr ||
+ running_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested, true);
+ IncrementScheduledCount(suggested);
+ break;
+ } else {
+ // We couldn't perform a migration, but we should check again on a future
+ // yield.
+ recheck = true;
+ }
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ // If we still have a suggestion or the next thread is different, we have an update to
+ // perform.
+ if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else if (!recheck) {
+ // Otherwise if we don't need to re-check, set the thread's yield count so that we
+ // won't waste work until the process is scheduled again.
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ }
+ }
+}
+
+void KScheduler::YieldToAnyThread() {
+ auto& kernel = system.Kernel();
+
+ // Validate preconditions.
+ ASSERT(CanSchedule(kernel));
+ ASSERT(kernel.CurrentProcess() != nullptr);
+
+ // Get the current thread and process.
+ Thread& cur_thread = *GetCurrentThread();
+ Process& cur_process = *kernel.CurrentProcess();
+
+ // If the thread's yield count matches, there's nothing for us to do.
+ if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
+ return;
+ }
+
+ // Get a reference to the priority queue.
+ auto& priority_queue = GetPriorityQueue(kernel);
+
+ // Perform the yield.
+ {
+ KScopedSchedulerLock lock(kernel);
+
+ const auto cur_state = cur_thread.scheduling_state;
+ if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
+ // Get the current active core.
+ const s32 core_id = cur_thread.GetActiveCore();
+
+ // Migrate the current thread to core -1.
+ cur_thread.SetActiveCore(-1);
+ priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
+ IncrementScheduledCount(std::addressof(cur_thread));
+
+ // If there's nothing scheduled, we can try to perform a migration.
+ if (priority_queue.GetScheduledFront(core_id) == nullptr) {
+ // While we have a suggested thread, try to migrate it!
+ Thread* suggested = priority_queue.GetSuggestedFront(core_id);
+ while (suggested != nullptr) {
+ // Check if the suggested thread is the top thread on its core.
+ const s32 suggested_core = suggested->GetActiveCore();
+ if (Thread* top_on_suggested_core =
+ (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
+ : nullptr;
+ top_on_suggested_core != suggested) {
+ // If we're allowed to do a migration, do one.
+ if (top_on_suggested_core == nullptr ||
+ top_on_suggested_core->GetPriority() >=
+ HighestCoreMigrationAllowedPriority) {
+ suggested->SetActiveCore(core_id);
+ priority_queue.ChangeCore(suggested_core, suggested);
+ IncrementScheduledCount(suggested);
+ }
+
+ // Regardless of whether we migrated, we had a candidate, so we're done.
+ break;
+ }
+
+ // Get the next suggestion.
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
+ }
+
+ // If the suggestion is different from the current thread, we need to perform an
+ // update.
+ if (suggested != std::addressof(cur_thread)) {
+ SetSchedulerUpdateNeeded(kernel);
+ } else {
+ // Otherwise, set the thread's yield count so that we won't waste work until the
+ // process is scheduled again.
+ cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
+ }
+ } else {
+ // Otherwise, we have an update to perform.
+ SetSchedulerUpdateNeeded(kernel);
+ }
+ }
+ }
+}
+
+KScheduler::KScheduler(Core::System& system, std::size_t core_id)
+ : system(system), core_id(core_id) {
+ switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
+ this->state.needs_scheduling = true;
+ this->state.interrupt_task_thread_runnable = false;
+ this->state.should_count_idle = false;
+ this->state.idle_count = 0;
+ this->state.idle_thread_stack = nullptr;
+ this->state.highest_priority_thread = nullptr;
+}
+
+KScheduler::~KScheduler() = default;
+
+Thread* KScheduler::GetCurrentThread() const {
+ if (current_thread) {
+ return current_thread;
+ }
+ return idle_thread;
+}
+
+u64 KScheduler::GetLastContextSwitchTicks() const {
+ return last_context_switch_time;
+}
+
+void KScheduler::RescheduleCurrentCore() {
+ ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
+
+ auto& phys_core = system.Kernel().PhysicalCore(core_id);
+ if (phys_core.IsInterrupted()) {
+ phys_core.ClearInterrupt();
+ }
+ guard.lock();
+ if (this->state.needs_scheduling) {
+ Schedule();
+ } else {
+ guard.unlock();
+ }
+}
+
+void KScheduler::OnThreadStart() {
+ SwitchContextStep2();
+}
+
+void KScheduler::Unload(Thread* thread) {
+ if (thread) {
+ thread->SetIsRunning(false);
+ if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
+ system.ArmInterface(core_id).ExceptionalExit();
+ thread->SetContinuousOnSVC(false);
+ }
+ if (!thread->IsHLEThread() && !thread->HasExited()) {
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.SaveContext(thread->GetContext32());
+ cpu_core.SaveContext(thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ }
+ thread->context_guard.unlock();
+ }
+}
+
+void KScheduler::Reload(Thread* thread) {
+ if (thread) {
+ ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
+ "Thread must be runnable.");
+
+ // Cancel any outstanding wakeup events for this thread
+ thread->SetIsRunning(true);
+ thread->SetWasRunning(false);
+
+ auto* const thread_owner_process = thread->GetOwnerProcess();
+ if (thread_owner_process != nullptr) {
+ system.Kernel().MakeCurrentProcess(thread_owner_process);
+ }
+ if (!thread->IsHLEThread()) {
+ Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+ }
+ }
+}
+
+void KScheduler::SwitchContextStep2() {
+ // Load context of new thread
+ Reload(current_thread);
+
+ RescheduleCurrentCore();
+}
+
+void KScheduler::ScheduleImpl() {
+ Thread* previous_thread = current_thread;
+ current_thread = state.highest_priority_thread;
+
+ this->state.needs_scheduling = false;
+
+ if (current_thread == previous_thread) {
+ guard.unlock();
+ return;
+ }
+
+ Process* const previous_process = system.Kernel().CurrentProcess();
+
+ UpdateLastContextSwitchTime(previous_thread, previous_process);
+
+ // Save context for previous thread
+ Unload(previous_thread);
+
+ std::shared_ptr<Common::Fiber>* old_context;
+ if (previous_thread != nullptr) {
+ old_context = &previous_thread->GetHostContext();
+ } else {
+ old_context = &idle_thread->GetHostContext();
+ }
+ guard.unlock();
+
+ Common::Fiber::YieldTo(*old_context, switch_fiber);
+ /// When a thread wakes up, the scheduler may have changed to other in another core.
+ auto& next_scheduler = *system.Kernel().CurrentScheduler();
+ next_scheduler.SwitchContextStep2();
+}
+
+void KScheduler::OnSwitch(void* this_scheduler) {
+ KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
+ sched->SwitchToCurrent();
+}
+
+void KScheduler::SwitchToCurrent() {
+ while (true) {
+ {
+ std::scoped_lock lock{guard};
+ current_thread = state.highest_priority_thread;
+ this->state.needs_scheduling = false;
+ }
+ const auto is_switch_pending = [this] {
+ std::scoped_lock lock{guard};
+ return state.needs_scheduling.load(std::memory_order_relaxed);
+ };
+ do {
+ if (current_thread != nullptr && !current_thread->IsHLEThread()) {
+ current_thread->context_guard.lock();
+ if (!current_thread->IsRunnable()) {
+ current_thread->context_guard.unlock();
+ break;
+ }
+ if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
+ current_thread->context_guard.unlock();
+ break;
+ }
+ }
+ std::shared_ptr<Common::Fiber>* next_context;
+ if (current_thread != nullptr) {
+ next_context = &current_thread->GetHostContext();
+ } else {
+ next_context = &idle_thread->GetHostContext();
+ }
+ Common::Fiber::YieldTo(switch_fiber, *next_context);
+ } while (!is_switch_pending());
+ }
+}
+
+void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
+ const u64 prev_switch_ticks = last_context_switch_time;
+ const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
+ const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
+
+ if (thread != nullptr) {
+ thread->UpdateCPUTimeTicks(update_ticks);
+ }
+
+ if (process != nullptr) {
+ process->UpdateCPUTimeTicks(update_ticks);
+ }
+
+ last_context_switch_time = most_recent_switch_ticks;
+}
+
+void KScheduler::Initialize() {
+ std::string name = "Idle Thread Id:" + std::to_string(core_id);
+ std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
+ void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
+ ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
+ auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
+ nullptr, std::move(init_func), init_func_parameter);
+ idle_thread = thread_res.Unwrap().get();
+
+ {
+ KScopedSchedulerLock lock{system.Kernel()};
+ idle_thread->SetStatus(ThreadStatus::Ready);
+ }
+}
+
+KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
+ : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
+
+KScopedSchedulerLock::~KScopedSchedulerLock() = default;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
new file mode 100644
index 000000000..e84abc84c
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -0,0 +1,201 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include <atomic>
+
+#include "common/common_types.h"
+#include "common/spin_lock.h"
+#include "core/hle/kernel/global_scheduler_context.h"
+#include "core/hle/kernel/k_priority_queue.h"
+#include "core/hle/kernel/k_scheduler_lock.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+
+namespace Common {
+class Fiber;
+}
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+
+class KernelCore;
+class Process;
+class SchedulerLock;
+class Thread;
+
+class KScheduler final {
+public:
+ explicit KScheduler(Core::System& system, std::size_t core_id);
+ ~KScheduler();
+
+ /// Reschedules to the next available thread (call after current thread is suspended)
+ void RescheduleCurrentCore();
+
+ /// Reschedules cores pending reschedule, to be called on EnableScheduling.
+ static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
+ Core::EmuThreadHandle global_thread);
+
+ /// The next two are for SingleCore Only.
+ /// Unload current thread before preempting core.
+ void Unload(Thread* thread);
+
+ /// Reload current thread after core preemption.
+ void Reload(Thread* thread);
+
+ /// Gets the current running thread
+ [[nodiscard]] Thread* GetCurrentThread() const;
+
+ /// Gets the timestamp for the last context switch in ticks.
+ [[nodiscard]] u64 GetLastContextSwitchTicks() const;
+
+ [[nodiscard]] bool ContextSwitchPending() const {
+ return state.needs_scheduling.load(std::memory_order_relaxed);
+ }
+
+ void Initialize();
+
+ void OnThreadStart();
+
+ [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
+ return switch_fiber;
+ }
+
+ [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
+ return switch_fiber;
+ }
+
+ [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
+
+ /**
+ * Takes a thread and moves it to the back of the it's priority list.
+ *
+ * @note This operation can be redundant and no scheduling is changed if marked as so.
+ */
+ void YieldWithoutCoreMigration();
+
+ /**
+ * Takes a thread and moves it to the back of the it's priority list.
+ * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
+ * a better priority than the next thread in the core.
+ *
+ * @note This operation can be redundant and no scheduling is changed if marked as so.
+ */
+ void YieldWithCoreMigration();
+
+ /**
+ * Takes a thread and moves it out of the scheduling queue.
+ * and into the suggested queue. If no thread can be scheduled afterwards in that core,
+ * a suggested thread is obtained instead.
+ *
+ * @note This operation can be redundant and no scheduling is changed if marked as so.
+ */
+ void YieldToAnyThread();
+
+ /// Notify the scheduler a thread's status has changed.
+ static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
+
+ /// Notify the scheduler a thread's priority has changed.
+ static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
+ u32 old_priority);
+
+ /// Notify the scheduler a thread's core and/or affinity mask has changed.
+ static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
+ const KAffinityMask& old_affinity, s32 old_core);
+
+ static bool CanSchedule(KernelCore& kernel);
+ static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
+ static void SetSchedulerUpdateNeeded(KernelCore& kernel);
+ static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
+ static void DisableScheduling(KernelCore& kernel);
+ static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
+ Core::EmuThreadHandle global_thread);
+ [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
+
+private:
+ friend class GlobalSchedulerContext;
+
+ /**
+ * Takes care of selecting the new scheduled threads in three steps:
+ *
+ * 1. First a thread is selected from the top of the priority queue. If no thread
+ * is obtained then we move to step two, else we are done.
+ *
+ * 2. Second we try to get a suggested thread that's not assigned to any core or
+ * that is not the top thread in that core.
+ *
+ * 3. Third is no suggested thread is found, we do a second pass and pick a running
+ * thread in another core and swap it with its current thread.
+ *
+ * returns the cores needing scheduling.
+ */
+ [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
+
+ [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
+
+ void RotateScheduledQueue(s32 core_id, s32 priority);
+
+ void Schedule() {
+ ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
+ this->ScheduleImpl();
+ }
+
+ /// Switches the CPU's active thread context to that of the specified thread
+ void ScheduleImpl();
+
+ /// When a thread wakes up, it must run this through it's new scheduler
+ void SwitchContextStep2();
+
+ /**
+ * Called on every context switch to update the internal timestamp
+ * This also updates the running time ticks for the given thread and
+ * process using the following difference:
+ *
+ * ticks += most_recent_ticks - last_context_switch_ticks
+ *
+ * The internal tick timestamp for the scheduler is simply the
+ * most recent tick count retrieved. No special arithmetic is
+ * applied to it.
+ */
+ void UpdateLastContextSwitchTime(Thread* thread, Process* process);
+
+ static void OnSwitch(void* this_scheduler);
+ void SwitchToCurrent();
+
+ Thread* current_thread{};
+ Thread* idle_thread{};
+
+ std::shared_ptr<Common::Fiber> switch_fiber{};
+
+ struct SchedulingState {
+ std::atomic<bool> needs_scheduling;
+ bool interrupt_task_thread_runnable{};
+ bool should_count_idle{};
+ u64 idle_count{};
+ Thread* highest_priority_thread{};
+ void* idle_thread_stack{};
+ };
+
+ SchedulingState state;
+
+ Core::System& system;
+ u64 last_context_switch_time{};
+ const std::size_t core_id;
+
+ Common::SpinLock guard{};
+};
+
+class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
+public:
+ explicit KScopedSchedulerLock(KernelCore& kernel);
+ ~KScopedSchedulerLock();
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
new file mode 100644
index 000000000..2f1c1f691
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -0,0 +1,75 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/spin_lock.h"
+#include "core/hardware_properties.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+class KernelCore;
+
+template <typename SchedulerType>
+class KAbstractSchedulerLock {
+public:
+ explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
+
+ bool IsLockedByCurrentThread() const {
+ return this->owner_thread == kernel.GetCurrentEmuThreadID();
+ }
+
+ void Lock() {
+ if (this->IsLockedByCurrentThread()) {
+ // If we already own the lock, we can just increment the count.
+ ASSERT(this->lock_count > 0);
+ this->lock_count++;
+ } else {
+ // Otherwise, we want to disable scheduling and acquire the spinlock.
+ SchedulerType::DisableScheduling(kernel);
+ this->spin_lock.lock();
+
+ // For debug, ensure that our state is valid.
+ ASSERT(this->lock_count == 0);
+ ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
+
+ // Increment count, take ownership.
+ this->lock_count = 1;
+ this->owner_thread = kernel.GetCurrentEmuThreadID();
+ }
+ }
+
+ void Unlock() {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(this->lock_count > 0);
+
+ // Release an instance of the lock.
+ if ((--this->lock_count) == 0) {
+ // We're no longer going to hold the lock. Take note of what cores need scheduling.
+ const u64 cores_needing_scheduling =
+ SchedulerType::UpdateHighestPriorityThreads(kernel);
+ Core::EmuThreadHandle leaving_thread = owner_thread;
+
+ // Note that we no longer hold the lock, and unlock the spinlock.
+ this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
+ this->spin_lock.unlock();
+
+ // Enable scheduling, and perform a rescheduling operation.
+ SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
+ }
+ }
+
+private:
+ KernelCore& kernel;
+ Common::SpinLock spin_lock{};
+ s32 lock_count{};
+ Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
new file mode 100644
index 000000000..d7cc557b2
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -0,0 +1,41 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+template <typename T>
+concept KLockable = !std::is_reference_v<T> && requires(T & t) {
+ { t.Lock() }
+ ->std::same_as<void>;
+ { t.Unlock() }
+ ->std::same_as<void>;
+};
+
+template <typename T>
+requires KLockable<T> class KScopedLock {
+public:
+ explicit KScopedLock(T* l) : lock_ptr(l) {
+ this->lock_ptr->Lock();
+ }
+ explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
+ }
+ ~KScopedLock() {
+ this->lock_ptr->Unlock();
+ }
+
+ KScopedLock(const KScopedLock&) = delete;
+ KScopedLock(KScopedLock&&) = delete;
+
+private:
+ T* lock_ptr;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
new file mode 100644
index 000000000..2bb3817fa
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -0,0 +1,50 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/thread.h"
+#include "core/hle/kernel/time_manager.h"
+
+namespace Kernel {
+
+class KScopedSchedulerLockAndSleep {
+public:
+ explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
+ s64 timeout)
+ : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
+ event_handle = InvalidHandle;
+
+ // Lock the scheduler.
+ kernel.GlobalSchedulerContext().scheduler_lock.Lock();
+ }
+
+ ~KScopedSchedulerLockAndSleep() {
+ // Register the sleep.
+ if (this->timeout_tick > 0) {
+ kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
+ }
+
+ // Unlock the scheduler.
+ kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
+ }
+
+ void CancelSleep() {
+ this->timeout_tick = 0;
+ }
+
+private:
+ KernelCore& kernel;
+ Handle& event_handle;
+ Thread* thread{};
+ s64 timeout_tick{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index f2b0fe2fd..e8ece8164 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -7,15 +7,15 @@
#include <bitset>
#include <functional>
#include <memory>
-#include <mutex>
#include <thread>
-#include <unordered_map>
+#include <unordered_set>
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "common/thread.h"
+#include "common/thread_worker.h"
#include "core/arm/arm_interface.h"
#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/exclusive_monitor.h"
@@ -28,6 +28,7 @@
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_layout.h"
#include "core/hle/kernel/memory/memory_manager.h"
@@ -35,7 +36,7 @@
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/kernel/scheduler.h"
+#include "core/hle/kernel/service_thread.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/synchronization.h"
#include "core/hle/kernel/thread.h"
@@ -50,17 +51,20 @@ namespace Kernel {
struct KernelCore::Impl {
explicit Impl(Core::System& system, KernelCore& kernel)
- : global_scheduler{kernel}, synchronization{system}, time_manager{system},
- global_handle_table{kernel}, system{system} {}
+ : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
+ system} {}
void SetMulticore(bool is_multicore) {
this->is_multicore = is_multicore;
}
void Initialize(KernelCore& kernel) {
- Shutdown();
RegisterHostThread();
+ global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
+ service_thread_manager =
+ std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
+
InitializePhysicalCores();
InitializeSystemResourceLimit(kernel);
InitializeMemoryLayout();
@@ -69,7 +73,19 @@ struct KernelCore::Impl {
InitializeSuspendThreads();
}
+ void InitializeCores() {
+ for (auto& core : cores) {
+ core.Initialize(current_process->Is64BitProcess());
+ }
+ }
+
void Shutdown() {
+ process_list.clear();
+
+ // Ensures all service threads gracefully shutdown
+ service_thread_manager.reset();
+ service_threads.clear();
+
next_object_id = 0;
next_kernel_process_id = Process::InitialKIPIDMin;
next_user_process_id = Process::ProcessIDMin;
@@ -81,41 +97,30 @@ struct KernelCore::Impl {
}
}
- for (std::size_t i = 0; i < cores.size(); i++) {
- cores[i].Shutdown();
- schedulers[i].reset();
- }
cores.clear();
- registered_core_threads.reset();
-
- process_list.clear();
current_process = nullptr;
system_resource_limit = nullptr;
global_handle_table.Clear();
- preemption_event = nullptr;
- global_scheduler.Shutdown();
+ preemption_event = nullptr;
named_ports.clear();
- for (auto& core : cores) {
- core.Shutdown();
- }
- cores.clear();
-
exclusive_monitor.reset();
- host_thread_ids.clear();
+
+ // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
+ next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
}
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i);
- cores.emplace_back(system, i, *schedulers[i], interrupts[i]);
+ schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
+ cores.emplace_back(i, system, *schedulers[i], interrupts);
}
}
@@ -147,8 +152,8 @@ struct KernelCore::Impl {
preemption_event = Core::Timing::CreateEvent(
"PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
{
- SchedulerLock lock(kernel);
- global_scheduler.PreemptThreads();
+ KScopedSchedulerLock lock(kernel);
+ global_scheduler_context->PreemptThreads();
}
const auto time_interval = std::chrono::nanoseconds{
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
@@ -177,63 +182,62 @@ struct KernelCore::Impl {
void MakeCurrentProcess(Process* process) {
current_process = process;
-
if (process == nullptr) {
return;
}
- u32 core_id = GetCurrentHostThreadID();
+ const u32 core_id = GetCurrentHostThreadID();
if (core_id < Core::Hardware::NUM_CPU_CORES) {
system.Memory().SetCurrentPageTable(*process, core_id);
}
}
+ /// Creates a new host thread ID, should only be called by GetHostThreadId
+ u32 AllocateHostThreadId(std::optional<std::size_t> core_id) {
+ if (core_id) {
+ // The first for slots are reserved for CPU core threads
+ ASSERT(*core_id < Core::Hardware::NUM_CPU_CORES);
+ return static_cast<u32>(*core_id);
+ } else {
+ return next_host_thread_id++;
+ }
+ }
+
+ /// Gets the host thread ID for the caller, allocating a new one if this is the first time
+ u32 GetHostThreadId(std::optional<std::size_t> core_id = std::nullopt) {
+ const thread_local auto host_thread_id{AllocateHostThreadId(core_id)};
+ return host_thread_id;
+ }
+
+ /// Registers a CPU core thread by allocating a host thread ID for it
void RegisterCoreThread(std::size_t core_id) {
- std::unique_lock lock{register_thread_mutex};
+ ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
+ const auto this_id = GetHostThreadId(core_id);
if (!is_multicore) {
- single_core_thread_id = std::this_thread::get_id();
+ single_core_thread_id = this_id;
}
- const std::thread::id this_id = std::this_thread::get_id();
- const auto it = host_thread_ids.find(this_id);
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- ASSERT(it == host_thread_ids.end());
- ASSERT(!registered_core_threads[core_id]);
- host_thread_ids[this_id] = static_cast<u32>(core_id);
- registered_core_threads.set(core_id);
}
+ /// Registers a new host thread by allocating a host thread ID for it
void RegisterHostThread() {
- std::unique_lock lock{register_thread_mutex};
- const std::thread::id this_id = std::this_thread::get_id();
- const auto it = host_thread_ids.find(this_id);
- if (it != host_thread_ids.end()) {
- return;
- }
- host_thread_ids[this_id] = registered_thread_ids++;
+ [[maybe_unused]] const auto this_id = GetHostThreadId();
}
- u32 GetCurrentHostThreadID() const {
- const std::thread::id this_id = std::this_thread::get_id();
- if (!is_multicore) {
- if (single_core_thread_id == this_id) {
- return static_cast<u32>(system.GetCpuManager().CurrentCore());
- }
- }
- std::unique_lock lock{register_thread_mutex};
- const auto it = host_thread_ids.find(this_id);
- if (it == host_thread_ids.end()) {
- return Core::INVALID_HOST_THREAD_ID;
+ [[nodiscard]] u32 GetCurrentHostThreadID() {
+ const auto this_id = GetHostThreadId();
+ if (!is_multicore && single_core_thread_id == this_id) {
+ return static_cast<u32>(system.GetCpuManager().CurrentCore());
}
- return it->second;
+ return this_id;
}
- Core::EmuThreadHandle GetCurrentEmuThreadID() const {
+ [[nodiscard]] Core::EmuThreadHandle GetCurrentEmuThreadID() {
Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
result.host_handle = GetCurrentHostThreadID();
if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
return result;
}
- const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
+ const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
const Kernel::Thread* current = sched.GetCurrentThread();
if (current != nullptr && !current->IsPhantomMode()) {
result.guest_handle = current->GetGlobalHandle();
@@ -302,7 +306,7 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<std::shared_ptr<Process>> process_list;
Process* current_process = nullptr;
- Kernel::GlobalScheduler global_scheduler;
+ std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::Synchronization synchronization;
Kernel::TimeManager time_manager;
@@ -321,11 +325,8 @@ struct KernelCore::Impl {
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
std::vector<Kernel::PhysicalCore> cores;
- // 0-3 IDs represent core threads, >3 represent others
- std::unordered_map<std::thread::id, u32> host_thread_ids;
- u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
- std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads;
- mutable std::mutex register_thread_mutex;
+ // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
+ std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
// Kernel memory management
std::unique_ptr<Memory::MemoryManager> memory_manager;
@@ -337,12 +338,19 @@ struct KernelCore::Impl {
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
+ // Threads used for services
+ std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
+
+ // Service threads are managed by a worker thread, so that a calling service thread can queue up
+ // the release of itself
+ std::unique_ptr<Common::ThreadWorker> service_thread_manager;
+
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
- std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
+ std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
bool is_multicore{};
- std::thread::id single_core_thread_id{};
+ u32 single_core_thread_id{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
@@ -363,6 +371,10 @@ void KernelCore::Initialize() {
impl->Initialize(*this);
}
+void KernelCore::InitializeCores() {
+ impl->InitializeCores();
+}
+
void KernelCore::Shutdown() {
impl->Shutdown();
}
@@ -395,19 +407,19 @@ const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const
return impl->process_list;
}
-Kernel::GlobalScheduler& KernelCore::GlobalScheduler() {
- return impl->global_scheduler;
+Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
+ return *impl->global_scheduler_context;
}
-const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
- return impl->global_scheduler;
+const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
+ return *impl->global_scheduler_context;
}
-Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) {
+Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
return *impl->schedulers[id];
}
-const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const {
+const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
return *impl->schedulers[id];
}
@@ -431,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
return impl->cores[core_id];
}
-Kernel::Scheduler& KernelCore::CurrentScheduler() {
+Kernel::KScheduler* KernelCore::CurrentScheduler() {
u32 core_id = impl->GetCurrentHostThreadID();
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- return *impl->schedulers[core_id];
-}
-
-const Kernel::Scheduler& KernelCore::CurrentScheduler() const {
- u32 core_id = impl->GetCurrentHostThreadID();
- ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
- return *impl->schedulers[core_id];
+ if (core_id >= Core::Hardware::NUM_CPU_CORES) {
+ // This is expected when called from not a guest thread
+ return {};
+ }
+ return impl->schedulers[core_id].get();
}
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
@@ -477,12 +486,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
}
void KernelCore::InvalidateAllInstructionCaches() {
- auto& threads = GlobalScheduler().GetThreadList();
- for (auto& thread : threads) {
- if (!thread->IsHLEThread()) {
- auto& arm_interface = thread->ArmInterface();
- arm_interface.ClearInstructionCache();
+ for (auto& physical_core : impl->cores) {
+ physical_core.ArmInterface().ClearInstructionCache();
+ }
+}
+
+void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
+ for (auto& physical_core : impl->cores) {
+ if (!physical_core.IsInitialized()) {
+ continue;
}
+ physical_core.ArmInterface().InvalidateCacheRange(addr, size);
}
}
@@ -598,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
void KernelCore::Suspend(bool in_suspention) {
const bool should_suspend = exception_exited || in_suspention;
{
- SchedulerLock lock(*this);
+ KScopedSchedulerLock lock(*this);
ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
impl->suspend_threads[i]->SetStatus(status);
@@ -625,4 +639,19 @@ void KernelCore::ExitSVCProfile() {
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
}
+std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
+ auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
+ impl->service_thread_manager->QueueWork(
+ [this, service_thread] { impl->service_threads.emplace(service_thread); });
+ return service_thread;
+}
+
+void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
+ impl->service_thread_manager->QueueWork([this, service_thread] {
+ if (auto strong_ptr = service_thread.lock()) {
+ impl->service_threads.erase(strong_ptr);
+ }
+ });
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 16285c3f0..e3169f5a7 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -35,13 +35,14 @@ class SlabHeap;
class AddressArbiter;
class ClientPort;
-class GlobalScheduler;
+class GlobalSchedulerContext;
class HandleTable;
class PhysicalCore;
class Process;
class ResourceLimit;
-class Scheduler;
+class KScheduler;
class SharedMemory;
+class ServiceThread;
class Synchronization;
class Thread;
class TimeManager;
@@ -74,6 +75,9 @@ public:
/// Resets the kernel to a clean slate for use.
void Initialize();
+ /// Initializes the CPU cores.
+ void InitializeCores();
+
/// Clears all resources in use by the kernel instance.
void Shutdown();
@@ -99,16 +103,16 @@ public:
const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
/// Gets the sole instance of the global scheduler
- Kernel::GlobalScheduler& GlobalScheduler();
+ Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
/// Gets the sole instance of the global scheduler
- const Kernel::GlobalScheduler& GlobalScheduler() const;
+ const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
- Kernel::Scheduler& Scheduler(std::size_t id);
+ Kernel::KScheduler& Scheduler(std::size_t id);
/// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
- const Kernel::Scheduler& Scheduler(std::size_t id) const;
+ const Kernel::KScheduler& Scheduler(std::size_t id) const;
/// Gets the an instance of the respective physical CPU core.
Kernel::PhysicalCore& PhysicalCore(std::size_t id);
@@ -117,10 +121,7 @@ public:
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
/// Gets the sole instance of the Scheduler at the current running core.
- Kernel::Scheduler& CurrentScheduler();
-
- /// Gets the sole instance of the Scheduler at the current running core.
- const Kernel::Scheduler& CurrentScheduler() const;
+ Kernel::KScheduler* CurrentScheduler();
/// Gets the an instance of the current physical CPU core.
Kernel::PhysicalCore& CurrentPhysicalCore();
@@ -153,6 +154,8 @@ public:
void InvalidateAllInstructionCaches();
+ void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
+
/// Adds a port to the named port table
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
@@ -225,6 +228,22 @@ public:
void ExitSVCProfile();
+ /**
+ * Creates an HLE service thread, which are used to execute service routines asynchronously.
+ * While these are allocated per ServerSession, these need to be owned and managed outside of
+ * ServerSession to avoid a circular dependency.
+ * @param name String name for the ServerSession creating this thread, used for debug purposes.
+ * @returns The a weak pointer newly created service thread.
+ */
+ std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
+
+ /**
+ * Releases a HLE service thread, instructing KernelCore to free it. This should be called when
+ * the ServerSession associated with the thread is destroyed.
+ * @param service_thread Service thread to release.
+ */
+ void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
+
private:
friend class Object;
friend class Process;
diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp
index e4288cab4..6cf43ba24 100644
--- a/src/core/hle/kernel/memory/address_space_info.cpp
+++ b/src/core/hle/kernel/memory/address_space_info.cpp
@@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
UNREACHABLE();
+ return 0;
}
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
@@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
UNREACHABLE();
+ return 0;
}
} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h
index 9d7839d08..83acece1e 100644
--- a/src/core/hle/kernel/memory/memory_block.h
+++ b/src/core/hle/kernel/memory/memory_block.h
@@ -73,12 +73,12 @@ enum class MemoryState : u32 {
ThreadLocal =
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
- Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
- FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
- FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
+ Transferred = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
+ FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
+ FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
- SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
- FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
+ SharedTransferred = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
+ FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
@@ -111,8 +111,8 @@ static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
-static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D);
-static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E);
+static_assert(static_cast<u32>(MemoryState::Transferred) == 0x015C3C0D);
+static_assert(static_cast<u32>(MemoryState::SharedTransferred) == 0x005C380E);
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
@@ -222,9 +222,9 @@ public:
public:
constexpr MemoryBlock() = default;
- constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state,
- MemoryPermission perm, MemoryAttribute attribute)
- : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {}
+ constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
+ MemoryPermission perm_, MemoryAttribute attribute_)
+ : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
constexpr VAddr GetAddress() const {
return addr;
diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h
index 6e1d41075..f57d1bbcc 100644
--- a/src/core/hle/kernel/memory/memory_block_manager.h
+++ b/src/core/hle/kernel/memory/memory_block_manager.h
@@ -57,8 +57,8 @@ public:
private:
void MergeAdjacent(iterator it, iterator& next_it);
- const VAddr start_addr;
- const VAddr end_addr;
+ [[maybe_unused]] const VAddr start_addr;
+ [[maybe_unused]] const VAddr end_addr;
MemoryBlockTree memory_block_tree;
};
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index a3fadb533..080886554 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t
physical_memory_usage = 0;
memory_pool = pool;
- page_table_impl.Resize(address_space_width, PageBits, true);
+ page_table_impl.Resize(address_space_width, PageBits);
return InitializeMemoryLayout(start, end);
}
@@ -670,6 +670,11 @@ ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, Memo
return RESULT_SUCCESS;
}
+ if ((prev_perm & MemoryPermission::Execute) != (perm & MemoryPermission::Execute)) {
+ // Memory execution state is changing, invalidate CPU cache range
+ system.InvalidateCpuInstructionCacheRange(addr, size);
+ }
+
const std::size_t num_pages{size / PageSize};
const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None
? OperationType::ChangePermissionsAndRefresh
@@ -1002,8 +1007,8 @@ constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const {
case MemoryState::Shared:
case MemoryState::AliasCode:
case MemoryState::AliasCodeData:
- case MemoryState::Transfered:
- case MemoryState::SharedTransfered:
+ case MemoryState::Transferred:
+ case MemoryState::SharedTransferred:
case MemoryState::SharedCode:
case MemoryState::GeneratedCode:
case MemoryState::CodeOut:
@@ -1037,8 +1042,8 @@ constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const {
case MemoryState::Shared:
case MemoryState::AliasCode:
case MemoryState::AliasCodeData:
- case MemoryState::Transfered:
- case MemoryState::SharedTransfered:
+ case MemoryState::Transferred:
+ case MemoryState::SharedTransferred:
case MemoryState::SharedCode:
case MemoryState::GeneratedCode:
case MemoryState::CodeOut:
@@ -1075,8 +1080,8 @@ constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState s
case MemoryState::AliasCodeData:
case MemoryState::Stack:
case MemoryState::ThreadLocal:
- case MemoryState::Transfered:
- case MemoryState::SharedTransfered:
+ case MemoryState::Transferred:
+ case MemoryState::SharedTransferred:
case MemoryState::SharedCode:
case MemoryState::GeneratedCode:
case MemoryState::CodeOut:
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 8f6c944d1..4f8075e0e 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -11,11 +11,11 @@
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -73,9 +73,9 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
auto& kernel = system.Kernel();
std::shared_ptr<Thread> current_thread =
- SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
+ SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
return ERR_INVALID_ADDRESS;
@@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
}
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
auto* owner = current_thread->GetLockOwner();
if (owner != nullptr) {
owner->RemoveMutexWaiter(current_thread);
@@ -153,10 +153,10 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr
ResultCode Mutex::Release(VAddr address) {
auto& kernel = system.Kernel();
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
std::shared_ptr<Thread> current_thread =
- SharedFrom(kernel.CurrentScheduler().GetCurrentThread());
+ SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
auto [result, new_owner] = Unlock(current_thread, address);
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index c6bbdb080..7fea45f96 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,54 +2,60 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "common/assert.h"
-#include "common/logging/log.h"
#include "common/spin_lock.h"
-#include "core/arm/arm_interface.h"
-#ifdef ARCHITECTURE_x86_64
+#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
-#endif
-#include "core/arm/cpu_interrupt_handler.h"
-#include "core/arm/exclusive_monitor.h"
-#include "core/arm/unicorn/arm_unicorn.h"
#include "core/core.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/thread.h"
namespace Kernel {
-PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
- Core::CPUInterruptHandler& interrupt_handler)
- : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} {
-
- guard = std::make_unique<Common::SpinLock>();
-}
+PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
+ Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts)
+ : core_index{core_index}, system{system}, scheduler{scheduler},
+ interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
PhysicalCore::~PhysicalCore() = default;
-void PhysicalCore::Idle() {
- interrupt_handler.AwaitInterrupt();
+void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
+#ifdef ARCHITECTURE_x86_64
+ auto& kernel = system.Kernel();
+ if (is_64_bit) {
+ arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
+ system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ } else {
+ arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
+ system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ }
+#else
+#error Platform not supported yet.
+#endif
}
-void PhysicalCore::Shutdown() {
- scheduler.Shutdown();
+void PhysicalCore::Run() {
+ arm_interface->Run();
+}
+
+void PhysicalCore::Idle() {
+ interrupts[core_index].AwaitInterrupt();
}
bool PhysicalCore::IsInterrupted() const {
- return interrupt_handler.IsInterrupted();
+ return interrupts[core_index].IsInterrupted();
}
void PhysicalCore::Interrupt() {
guard->lock();
- interrupt_handler.SetInterrupt(true);
+ interrupts[core_index].SetInterrupt(true);
guard->unlock();
}
void PhysicalCore::ClearInterrupt() {
guard->lock();
- interrupt_handler.SetInterrupt(false);
+ interrupts[core_index].SetInterrupt(false);
guard->unlock();
}
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index d7a7a951c..f2b0911aa 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -4,19 +4,21 @@
#pragma once
+#include <array>
#include <cstddef>
#include <memory>
+#include "core/arm/arm_interface.h"
+
namespace Common {
class SpinLock;
}
namespace Kernel {
-class Scheduler;
+class KScheduler;
} // namespace Kernel
namespace Core {
-class ARM_Interface;
class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
@@ -26,17 +28,24 @@ namespace Kernel {
class PhysicalCore {
public:
- PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler,
- Core::CPUInterruptHandler& interrupt_handler);
+ PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler,
+ Core::CPUInterrupts& interrupts);
~PhysicalCore();
PhysicalCore(const PhysicalCore&) = delete;
PhysicalCore& operator=(const PhysicalCore&) = delete;
PhysicalCore(PhysicalCore&&) = default;
- PhysicalCore& operator=(PhysicalCore&&) = default;
+ PhysicalCore& operator=(PhysicalCore&&) = delete;
+
+ /// Initialize the core for the specified parameters.
+ void Initialize(bool is_64_bit);
+
+ /// Execute current jit state
+ void Run();
void Idle();
+
/// Interrupt this physical core.
void Interrupt();
@@ -46,8 +55,17 @@ public:
/// Check if this core is interrupted
bool IsInterrupted() const;
- // Shutdown this physical core.
- void Shutdown();
+ bool IsInitialized() const {
+ return arm_interface != nullptr;
+ }
+
+ Core::ARM_Interface& ArmInterface() {
+ return *arm_interface;
+ }
+
+ const Core::ARM_Interface& ArmInterface() const {
+ return *arm_interface;
+ }
bool IsMainCore() const {
return core_index == 0;
@@ -61,19 +79,21 @@ public:
return core_index;
}
- Kernel::Scheduler& Scheduler() {
+ Kernel::KScheduler& Scheduler() {
return scheduler;
}
- const Kernel::Scheduler& Scheduler() const {
+ const Kernel::KScheduler& Scheduler() const {
return scheduler;
}
private:
- Core::CPUInterruptHandler& interrupt_handler;
- std::size_t core_index;
- Kernel::Scheduler& scheduler;
+ const std::size_t core_index;
+ Core::System& system;
+ Kernel::KScheduler& scheduler;
+ Core::CPUInterrupts& interrupts;
std::unique_ptr<Common::SpinLock> guard;
+ std::unique_ptr<Core::ARM_Interface> arm_interface;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index ff9d9248b..b905b486a 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -4,6 +4,7 @@
#include <algorithm>
#include <bitset>
+#include <ctime>
#include <memory>
#include <random>
#include "common/alignment.h"
@@ -14,13 +15,13 @@
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_block_manager.h"
#include "core/hle/kernel/memory/page_table.h"
#include "core/hle/kernel/memory/slab_heap.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/lock.h"
#include "core/memory.h"
@@ -53,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
auto& kernel = system.Kernel();
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
{
- SchedulerLock lock{kernel};
+ KScopedSchedulerLock lock{kernel};
thread->SetStatus(ThreadStatus::Ready);
}
}
@@ -123,7 +124,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
: kernel.CreateNewUserProcessID();
process->capabilities.InitializeForMetadatalessProcess();
- std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(0));
+ std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
std::uniform_int_distribution<u64> distribution;
std::generate(process->random_entropy.begin(), process->random_entropy.end(),
[&] { return distribution(rng); });
@@ -212,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) {
}
ResultCode Process::ClearSignalState() {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
if (status == ProcessStatus::Exited) {
LOG_ERROR(Kernel, "called on a terminated process instance.");
return ERR_INVALID_STATE;
@@ -313,7 +314,7 @@ void Process::PrepareForTermination() {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread.get() == system.CurrentScheduler().GetCurrentThread())
+ if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -324,7 +325,7 @@ void Process::PrepareForTermination() {
}
};
- stop_threads(system.GlobalScheduler().GetThreadList());
+ stop_threads(system.GlobalSchedulerContext().GetThreadList());
FreeTLSRegion(tls_region_address);
tls_region_address = 0;
@@ -346,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
}
VAddr Process::CreateTLSRegion() {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
tls_page_iter != tls_pages.cend()) {
return *tls_page_iter->ReserveSlot();
@@ -377,7 +378,7 @@ VAddr Process::CreateTLSRegion() {
}
void Process::FreeTLSRegion(VAddr tls_address) {
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(system.Kernel());
const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
auto iter =
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index f45cb5674..e412e58aa 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -216,6 +216,16 @@ public:
total_process_running_time_ticks += ticks;
}
+ /// Gets the process schedule count, used for thread yelding
+ s64 GetScheduledCount() const {
+ return schedule_count;
+ }
+
+ /// Increments the process schedule count, used for thread yielding.
+ void IncrementScheduledCount() {
+ ++schedule_count;
+ }
+
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
u64 GetRandomEntropy(std::size_t index) const {
return random_entropy.at(index);
@@ -397,6 +407,9 @@ private:
/// Name of this process
std::string name;
+ /// Schedule count of this process
+ s64 schedule_count{};
+
/// System context
Core::System& system;
};
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 63880f13d..0f128c586 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -199,7 +199,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
break;
}
- LOG_ERROR(Kernel, "Invalid capability type! type={}", static_cast<u32>(type));
+ LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
}
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 6e286419e..cea262ce0 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -6,10 +6,10 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/readable_event.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
@@ -39,7 +39,7 @@ void ReadableEvent::Clear() {
}
ResultCode ReadableEvent::Reset() {
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (!is_signaled) {
LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
GetObjectId(), GetTypeName(), GetName());
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 212e442f4..7bf50339d 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -65,8 +65,8 @@ ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
limit[index] = value;
return RESULT_SUCCESS;
} else {
- LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}",
- static_cast<u32>(resource), value, index);
+ LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", resource,
+ value, index);
return ERR_INVALID_STATE;
}
}
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
deleted file mode 100644
index 5cbd3b912..000000000
--- a/src/core/hle/kernel/scheduler.cpp
+++ /dev/null
@@ -1,849 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-//
-// SelectThreads, Yield functions originally by TuxSH.
-// licensed under GPLv2 or later under exception provided by the author.
-
-#include <algorithm>
-#include <mutex>
-#include <set>
-#include <unordered_set>
-#include <utility>
-
-#include "common/assert.h"
-#include "common/bit_util.h"
-#include "common/fiber.h"
-#include "common/logging/log.h"
-#include "core/arm/arm_interface.h"
-#include "core/core.h"
-#include "core/core_timing.h"
-#include "core/cpu_manager.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
-#include "core/hle/kernel/time_manager.h"
-
-namespace Kernel {
-
-GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
-
-GlobalScheduler::~GlobalScheduler() = default;
-
-void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
- std::scoped_lock lock{global_list_guard};
- thread_list.push_back(std::move(thread));
-}
-
-void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
- std::scoped_lock lock{global_list_guard};
- thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
- thread_list.end());
-}
-
-u32 GlobalScheduler::SelectThreads() {
- ASSERT(is_locked);
- const auto update_thread = [](Thread* thread, Scheduler& sched) {
- std::scoped_lock lock{sched.guard};
- if (thread != sched.selected_thread_set.get()) {
- if (thread == nullptr) {
- ++sched.idle_selection_count;
- }
- sched.selected_thread_set = SharedFrom(thread);
- }
- const bool reschedule_pending =
- sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
- sched.is_context_switch_pending = reschedule_pending;
- std::atomic_thread_fence(std::memory_order_seq_cst);
- return reschedule_pending;
- };
- if (!is_reselection_pending.load()) {
- return 0;
- }
- std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
-
- u32 idle_cores{};
-
- // Step 1: Get top thread in schedule queue.
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- Thread* top_thread =
- scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
- if (top_thread != nullptr) {
- // TODO(Blinkhawk): Implement Thread Pinning
- } else {
- idle_cores |= (1ul << core);
- }
- top_threads[core] = top_thread;
- }
-
- while (idle_cores != 0) {
- u32 core_id = Common::CountTrailingZeroes32(idle_cores);
-
- if (!suggested_queue[core_id].empty()) {
- std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
- std::size_t num_candidates = 0;
- auto iter = suggested_queue[core_id].begin();
- Thread* suggested = nullptr;
- // Step 2: Try selecting a suggested thread.
- while (iter != suggested_queue[core_id].end()) {
- suggested = *iter;
- iter++;
- s32 suggested_core_id = suggested->GetProcessorID();
- Thread* top_thread =
- suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
- if (top_thread != suggested) {
- if (top_thread != nullptr &&
- top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
- suggested = nullptr;
- break;
- // There's a too high thread to do core migration, cancel
- }
- TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
- break;
- }
- suggested = nullptr;
- migration_candidates[num_candidates++] = suggested_core_id;
- }
- // Step 3: Select a suggested thread from another core
- if (suggested == nullptr) {
- for (std::size_t i = 0; i < num_candidates; i++) {
- s32 candidate_core = migration_candidates[i];
- suggested = top_threads[candidate_core];
- auto it = scheduled_queue[candidate_core].begin();
- it++;
- Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
- if (next != nullptr) {
- TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
- suggested);
- top_threads[candidate_core] = next;
- break;
- } else {
- suggested = nullptr;
- }
- }
- }
- top_threads[core_id] = suggested;
- }
-
- idle_cores &= ~(1ul << core_id);
- }
- u32 cores_needing_context_switch{};
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- Scheduler& sched = kernel.Scheduler(core);
- ASSERT(top_threads[core] == nullptr ||
- static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
- if (update_thread(top_threads[core], sched)) {
- cores_needing_context_switch |= (1ul << core);
- }
- }
- return cores_needing_context_switch;
-}
-
-bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
- ASSERT(is_locked);
- // Note: caller should use critical section, etc.
- if (!yielding_thread->IsRunnable()) {
- // Normally this case shouldn't happen except for SetThreadActivity.
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
- const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
- const u32 priority = yielding_thread->GetPriority();
-
- // Yield the thread
- Reschedule(priority, core_id, yielding_thread);
- const Thread* const winner = scheduled_queue[core_id].front();
- if (kernel.GetCurrentHostThreadID() != core_id) {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- return AskForReselectionOrMarkRedundant(yielding_thread, winner);
-}
-
-bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
- ASSERT(is_locked);
- // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
- // etc.
- if (!yielding_thread->IsRunnable()) {
- // Normally this case shouldn't happen except for SetThreadActivity.
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
- const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
- const u32 priority = yielding_thread->GetPriority();
-
- // Yield the thread
- Reschedule(priority, core_id, yielding_thread);
-
- std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
- for (std::size_t i = 0; i < current_threads.size(); i++) {
- current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
- }
-
- Thread* next_thread = scheduled_queue[core_id].front(priority);
- Thread* winner = nullptr;
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (source_core >= 0) {
- if (current_threads[source_core] != nullptr) {
- if (thread == current_threads[source_core] ||
- current_threads[source_core]->GetPriority() < min_regular_priority) {
- continue;
- }
- }
- }
- if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
- next_thread->GetPriority() < thread->GetPriority()) {
- if (thread->GetPriority() <= priority) {
- winner = thread;
- break;
- }
- }
- }
-
- if (winner != nullptr) {
- if (winner != yielding_thread) {
- TransferToCore(winner->GetPriority(), s32(core_id), winner);
- }
- } else {
- winner = next_thread;
- }
-
- if (kernel.GetCurrentHostThreadID() != core_id) {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- return AskForReselectionOrMarkRedundant(yielding_thread, winner);
-}
-
-bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
- ASSERT(is_locked);
- // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
- // etc.
- if (!yielding_thread->IsRunnable()) {
- // Normally this case shouldn't happen except for SetThreadActivity.
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
- Thread* winner = nullptr;
- const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
-
- // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
- TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
-
- // If the core is idle, perform load balancing, excluding the threads that have just used this
- // function...
- if (scheduled_queue[core_id].empty()) {
- // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
- std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
- for (std::size_t i = 0; i < current_threads.size(); i++) {
- current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
- }
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (source_core < 0 || thread == current_threads[source_core]) {
- continue;
- }
- if (current_threads[source_core] == nullptr ||
- current_threads[source_core]->GetPriority() >= min_regular_priority) {
- winner = thread;
- }
- break;
- }
- if (winner != nullptr) {
- if (winner != yielding_thread) {
- TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
- }
- } else {
- winner = yielding_thread;
- }
- } else {
- winner = scheduled_queue[core_id].front();
- }
-
- if (kernel.GetCurrentHostThreadID() != core_id) {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- return AskForReselectionOrMarkRedundant(yielding_thread, winner);
-}
-
-void GlobalScheduler::PreemptThreads() {
- ASSERT(is_locked);
- for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- const u32 priority = preemption_priorities[core_id];
-
- if (scheduled_queue[core_id].size(priority) > 0) {
- if (scheduled_queue[core_id].size(priority) > 1) {
- scheduled_queue[core_id].front(priority)->IncrementYieldCount();
- }
- scheduled_queue[core_id].yield(priority);
- if (scheduled_queue[core_id].size(priority) > 1) {
- scheduled_queue[core_id].front(priority)->IncrementYieldCount();
- }
- }
-
- Thread* current_thread =
- scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
- Thread* winner = nullptr;
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (thread->GetPriority() != priority) {
- continue;
- }
- if (source_core >= 0) {
- Thread* next_thread = scheduled_queue[source_core].empty()
- ? nullptr
- : scheduled_queue[source_core].front();
- if (next_thread != nullptr && next_thread->GetPriority() < 2) {
- break;
- }
- if (next_thread == thread) {
- continue;
- }
- }
- if (current_thread != nullptr &&
- current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
- winner = thread;
- break;
- }
- }
-
- if (winner != nullptr) {
- TransferToCore(winner->GetPriority(), s32(core_id), winner);
- current_thread =
- winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
- }
-
- if (current_thread != nullptr && current_thread->GetPriority() > priority) {
- for (auto& thread : suggested_queue[core_id]) {
- const s32 source_core = thread->GetProcessorID();
- if (thread->GetPriority() < priority) {
- continue;
- }
- if (source_core >= 0) {
- Thread* next_thread = scheduled_queue[source_core].empty()
- ? nullptr
- : scheduled_queue[source_core].front();
- if (next_thread != nullptr && next_thread->GetPriority() < 2) {
- break;
- }
- if (next_thread == thread) {
- continue;
- }
- }
- if (current_thread != nullptr &&
- current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
- winner = thread;
- break;
- }
- }
-
- if (winner != nullptr) {
- TransferToCore(winner->GetPriority(), s32(core_id), winner);
- current_thread = winner;
- }
- }
-
- is_reselection_pending.store(true, std::memory_order_release);
- }
-}
-
-void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
- Core::EmuThreadHandle global_thread) {
- u32 current_core = global_thread.host_handle;
- bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
- (current_core < Core::Hardware::NUM_CPU_CORES);
- while (cores_pending_reschedule != 0) {
- u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
- ASSERT(core < Core::Hardware::NUM_CPU_CORES);
- if (!must_context_switch || core != current_core) {
- auto& phys_core = kernel.PhysicalCore(core);
- phys_core.Interrupt();
- } else {
- must_context_switch = true;
- }
- cores_pending_reschedule &= ~(1ul << core);
- }
- if (must_context_switch) {
- auto& core_scheduler = kernel.CurrentScheduler();
- kernel.ExitSVCProfile();
- core_scheduler.TryDoContextSwitch();
- kernel.EnterSVCProfile();
- }
-}
-
-void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- suggested_queue[core].add(thread, priority);
-}
-
-void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- suggested_queue[core].remove(thread, priority);
-}
-
-void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
- scheduled_queue[core].add(thread, priority);
-}
-
-void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
- scheduled_queue[core].add(thread, priority, false);
-}
-
-void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- scheduled_queue[core].remove(thread, priority);
- scheduled_queue[core].add(thread, priority);
-}
-
-void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
- ASSERT(is_locked);
- scheduled_queue[core].remove(thread, priority);
-}
-
-void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
- ASSERT(is_locked);
- const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
- const s32 source_core = thread->GetProcessorID();
- if (source_core == destination_core || !schedulable) {
- return;
- }
- thread->SetProcessorID(destination_core);
- if (source_core >= 0) {
- Unschedule(priority, static_cast<u32>(source_core), thread);
- }
- if (destination_core >= 0) {
- Unsuggest(priority, static_cast<u32>(destination_core), thread);
- Schedule(priority, static_cast<u32>(destination_core), thread);
- }
- if (source_core >= 0) {
- Suggest(priority, static_cast<u32>(source_core), thread);
- }
-}
-
-bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
- const Thread* winner) {
- if (current_thread == winner) {
- current_thread->IncrementYieldCount();
- return true;
- } else {
- is_reselection_pending.store(true, std::memory_order_release);
- return false;
- }
-}
-
-void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
- if (old_flags == thread->scheduling_state) {
- return;
- }
- ASSERT(is_locked);
-
- if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
- // In this case the thread was running, now it's pausing/exitting
- if (thread->processor_id >= 0) {
- Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Unsuggest(thread->current_priority, core, thread);
- }
- }
- } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
- // The thread is now set to running from being stopped
- if (thread->processor_id >= 0) {
- Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Suggest(thread->current_priority, core, thread);
- }
- }
- }
-
- SetReselectionPending();
-}
-
-void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
- if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
- return;
- }
- ASSERT(is_locked);
- if (thread->processor_id >= 0) {
- Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Unsuggest(old_priority, core, thread);
- }
- }
-
- if (thread->processor_id >= 0) {
- if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
- SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
- thread);
- } else {
- Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
- }
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
- Suggest(thread->current_priority, core, thread);
- }
- }
- thread->IncrementYieldCount();
- SetReselectionPending();
-}
-
-void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
- s32 old_core) {
- if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
- thread->current_priority >= THREADPRIO_COUNT) {
- return;
- }
- ASSERT(is_locked);
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (((old_affinity_mask >> core) & 1) != 0) {
- if (core == static_cast<u32>(old_core)) {
- Unschedule(thread->current_priority, core, thread);
- } else {
- Unsuggest(thread->current_priority, core, thread);
- }
- }
- }
-
- for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (((thread->affinity_mask >> core) & 1) != 0) {
- if (core == static_cast<u32>(thread->processor_id)) {
- Schedule(thread->current_priority, core, thread);
- } else {
- Suggest(thread->current_priority, core, thread);
- }
- }
- }
-
- thread->IncrementYieldCount();
- SetReselectionPending();
-}
-
-void GlobalScheduler::Shutdown() {
- for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- scheduled_queue[core].clear();
- suggested_queue[core].clear();
- }
- thread_list.clear();
-}
-
-void GlobalScheduler::Lock() {
- Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
- ASSERT(!current_thread.IsInvalid());
- if (current_thread == current_owner) {
- ++scope_lock;
- } else {
- inner_lock.lock();
- is_locked = true;
- current_owner = current_thread;
- ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
- scope_lock = 1;
- }
-}
-
-void GlobalScheduler::Unlock() {
- if (--scope_lock != 0) {
- ASSERT(scope_lock > 0);
- return;
- }
- u32 cores_pending_reschedule = SelectThreads();
- Core::EmuThreadHandle leaving_thread = current_owner;
- current_owner = Core::EmuThreadHandle::InvalidHandle();
- scope_lock = 1;
- is_locked = false;
- inner_lock.unlock();
- EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
-}
-
-Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
- switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
-}
-
-Scheduler::~Scheduler() = default;
-
-bool Scheduler::HaveReadyThreads() const {
- return system.GlobalScheduler().HaveReadyThreads(core_id);
-}
-
-Thread* Scheduler::GetCurrentThread() const {
- if (current_thread) {
- return current_thread.get();
- }
- return idle_thread.get();
-}
-
-Thread* Scheduler::GetSelectedThread() const {
- return selected_thread.get();
-}
-
-u64 Scheduler::GetLastContextSwitchTicks() const {
- return last_context_switch_time;
-}
-
-void Scheduler::TryDoContextSwitch() {
- auto& phys_core = system.Kernel().CurrentPhysicalCore();
- if (phys_core.IsInterrupted()) {
- phys_core.ClearInterrupt();
- }
- guard.lock();
- if (is_context_switch_pending) {
- SwitchContext();
- } else {
- guard.unlock();
- }
-}
-
-void Scheduler::OnThreadStart() {
- SwitchContextStep2();
-}
-
-void Scheduler::Unload() {
- Thread* thread = current_thread.get();
- if (thread) {
- thread->SetContinuousOnSVC(false);
- thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
- thread->SetIsRunning(false);
- if (!thread->IsHLEThread() && !thread->HasExited()) {
- Core::ARM_Interface& cpu_core = thread->ArmInterface();
- cpu_core.SaveContext(thread->GetContext32());
- cpu_core.SaveContext(thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
- }
- thread->context_guard.unlock();
- }
-}
-
-void Scheduler::Reload() {
- Thread* thread = current_thread.get();
- if (thread) {
- ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
- "Thread must be runnable.");
-
- // Cancel any outstanding wakeup events for this thread
- thread->SetIsRunning(true);
- thread->SetWasRunning(false);
- thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
-
- auto* const thread_owner_process = thread->GetOwnerProcess();
- if (thread_owner_process != nullptr) {
- system.Kernel().MakeCurrentProcess(thread_owner_process);
- }
- if (!thread->IsHLEThread()) {
- Core::ARM_Interface& cpu_core = thread->ArmInterface();
- cpu_core.LoadContext(thread->GetContext32());
- cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
- cpu_core.ChangeProcessorID(this->core_id);
- cpu_core.ClearExclusiveState();
- }
- }
-}
-
-void Scheduler::SwitchContextStep2() {
- // Load context of new thread
- if (selected_thread) {
- ASSERT_MSG(selected_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
- "Thread must be runnable.");
-
- // Cancel any outstanding wakeup events for this thread
- selected_thread->SetIsRunning(true);
- selected_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
- selected_thread->SetWasRunning(false);
-
- auto* const thread_owner_process = current_thread->GetOwnerProcess();
- if (thread_owner_process != nullptr) {
- system.Kernel().MakeCurrentProcess(thread_owner_process);
- }
- if (!selected_thread->IsHLEThread()) {
- Core::ARM_Interface& cpu_core = selected_thread->ArmInterface();
- cpu_core.LoadContext(selected_thread->GetContext32());
- cpu_core.LoadContext(selected_thread->GetContext64());
- cpu_core.SetTlsAddress(selected_thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(selected_thread->GetTPIDR_EL0());
- cpu_core.ChangeProcessorID(this->core_id);
- cpu_core.ClearExclusiveState();
- }
- }
-
- TryDoContextSwitch();
-}
-
-void Scheduler::SwitchContext() {
- current_thread_prev = current_thread;
- selected_thread = selected_thread_set;
- Thread* previous_thread = current_thread_prev.get();
- Thread* new_thread = selected_thread.get();
- current_thread = selected_thread;
-
- is_context_switch_pending = false;
-
- if (new_thread == previous_thread) {
- guard.unlock();
- return;
- }
-
- Process* const previous_process = system.Kernel().CurrentProcess();
-
- UpdateLastContextSwitchTime(previous_thread, previous_process);
-
- // Save context for previous thread
- if (previous_thread) {
- if (new_thread != nullptr && new_thread->IsSuspendThread()) {
- previous_thread->SetWasRunning(true);
- }
- previous_thread->SetContinuousOnSVC(false);
- previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
- previous_thread->SetIsRunning(false);
- if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) {
- Core::ARM_Interface& cpu_core = previous_thread->ArmInterface();
- cpu_core.SaveContext(previous_thread->GetContext32());
- cpu_core.SaveContext(previous_thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
- }
- previous_thread->context_guard.unlock();
- }
-
- std::shared_ptr<Common::Fiber>* old_context;
- if (previous_thread != nullptr) {
- old_context = &previous_thread->GetHostContext();
- } else {
- old_context = &idle_thread->GetHostContext();
- }
- guard.unlock();
-
- Common::Fiber::YieldTo(*old_context, switch_fiber);
- /// When a thread wakes up, the scheduler may have changed to other in another core.
- auto& next_scheduler = system.Kernel().CurrentScheduler();
- next_scheduler.SwitchContextStep2();
-}
-
-void Scheduler::OnSwitch(void* this_scheduler) {
- Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
- sched->SwitchToCurrent();
-}
-
-void Scheduler::SwitchToCurrent() {
- while (true) {
- {
- std::scoped_lock lock{guard};
- selected_thread = selected_thread_set;
- current_thread = selected_thread;
- is_context_switch_pending = false;
- }
- const auto is_switch_pending = [this] {
- std::scoped_lock lock{guard};
- return is_context_switch_pending;
- };
- do {
- if (current_thread != nullptr && !current_thread->IsHLEThread()) {
- current_thread->context_guard.lock();
- if (!current_thread->IsRunnable()) {
- current_thread->context_guard.unlock();
- break;
- }
- if (current_thread->GetProcessorID() != core_id) {
- current_thread->context_guard.unlock();
- break;
- }
- }
- std::shared_ptr<Common::Fiber>* next_context;
- if (current_thread != nullptr) {
- next_context = &current_thread->GetHostContext();
- } else {
- next_context = &idle_thread->GetHostContext();
- }
- Common::Fiber::YieldTo(switch_fiber, *next_context);
- } while (!is_switch_pending());
- }
-}
-
-void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
- const u64 prev_switch_ticks = last_context_switch_time;
- const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
- const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
-
- if (thread != nullptr) {
- thread->UpdateCPUTimeTicks(update_ticks);
- }
-
- if (process != nullptr) {
- process->UpdateCPUTimeTicks(update_ticks);
- }
-
- last_context_switch_time = most_recent_switch_ticks;
-}
-
-void Scheduler::Initialize() {
- std::string name = "Idle Thread Id:" + std::to_string(core_id);
- std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
- void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
- ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
- auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
- nullptr, std::move(init_func), init_func_parameter);
- idle_thread = std::move(thread_res).Unwrap();
-}
-
-void Scheduler::Shutdown() {
- current_thread = nullptr;
- selected_thread = nullptr;
-}
-
-SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
- kernel.GlobalScheduler().Lock();
-}
-
-SchedulerLock::~SchedulerLock() {
- kernel.GlobalScheduler().Unlock();
-}
-
-SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
- Thread* time_task, s64 nanoseconds)
- : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
- nanoseconds} {
- event_handle = InvalidHandle;
-}
-
-SchedulerLockAndSleep::~SchedulerLockAndSleep() {
- if (sleep_cancelled) {
- return;
- }
- auto& time_manager = kernel.TimeManager();
- time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
-}
-
-void SchedulerLockAndSleep::Release() {
- if (sleep_cancelled) {
- return;
- }
- auto& time_manager = kernel.TimeManager();
- time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
- sleep_cancelled = true;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
deleted file mode 100644
index b6f04dcea..000000000
--- a/src/core/hle/kernel/scheduler.h
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <atomic>
-#include <memory>
-#include <mutex>
-#include <vector>
-
-#include "common/common_types.h"
-#include "common/multi_level_queue.h"
-#include "common/spin_lock.h"
-#include "core/hardware_properties.h"
-#include "core/hle/kernel/thread.h"
-
-namespace Common {
-class Fiber;
-}
-
-namespace Core {
-class ARM_Interface;
-class System;
-} // namespace Core
-
-namespace Kernel {
-
-class KernelCore;
-class Process;
-class SchedulerLock;
-
-class GlobalScheduler final {
-public:
- explicit GlobalScheduler(KernelCore& kernel);
- ~GlobalScheduler();
-
- /// Adds a new thread to the scheduler
- void AddThread(std::shared_ptr<Thread> thread);
-
- /// Removes a thread from the scheduler
- void RemoveThread(std::shared_ptr<Thread> thread);
-
- /// Returns a list of all threads managed by the scheduler
- const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
- return thread_list;
- }
-
- /// Notify the scheduler a thread's status has changed.
- void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
-
- /// Notify the scheduler a thread's priority has changed.
- void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
-
- /// Notify the scheduler a thread's core and/or affinity mask has changed.
- void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
-
- /**
- * Takes care of selecting the new scheduled threads in three steps:
- *
- * 1. First a thread is selected from the top of the priority queue. If no thread
- * is obtained then we move to step two, else we are done.
- *
- * 2. Second we try to get a suggested thread that's not assigned to any core or
- * that is not the top thread in that core.
- *
- * 3. Third is no suggested thread is found, we do a second pass and pick a running
- * thread in another core and swap it with its current thread.
- *
- * returns the cores needing scheduling.
- */
- u32 SelectThreads();
-
- bool HaveReadyThreads(std::size_t core_id) const {
- return !scheduled_queue[core_id].empty();
- }
-
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- bool YieldThread(Thread* thread);
-
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
- * a better priority than the next thread in the core.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- bool YieldThreadAndBalanceLoad(Thread* thread);
-
- /**
- * Takes a thread and moves it out of the scheduling queue.
- * and into the suggested queue. If no thread can be scheduled afterwards in that core,
- * a suggested thread is obtained instead.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
-
- /**
- * Rotates the scheduling queues of threads at a preemption priority and then does
- * some core rebalancing. Preemption priorities can be found in the array
- * 'preemption_priorities'.
- *
- * @note This operation happens every 10ms.
- */
- void PreemptThreads();
-
- u32 CpuCoresCount() const {
- return Core::Hardware::NUM_CPU_CORES;
- }
-
- void SetReselectionPending() {
- is_reselection_pending.store(true, std::memory_order_release);
- }
-
- bool IsReselectionPending() const {
- return is_reselection_pending.load(std::memory_order_acquire);
- }
-
- void Shutdown();
-
-private:
- friend class SchedulerLock;
-
- /// Lock the scheduler to the current thread.
- void Lock();
-
- /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
- /// and reschedules current core if needed.
- void Unlock();
-
- void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
- Core::EmuThreadHandle global_thread);
-
- /**
- * Add a thread to the suggested queue of a cpu core. Suggested threads may be
- * picked if no thread is scheduled to run on the core.
- */
- void Suggest(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
- * picked if no thread is scheduled to run on the core.
- */
- void Unsuggest(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Add a thread to the scheduling queue of a cpu core. The thread is added at the
- * back the queue in its priority level.
- */
- void Schedule(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Add a thread to the scheduling queue of a cpu core. The thread is added at the
- * front the queue in its priority level.
- */
- void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
-
- /// Reschedule an already scheduled thread based on a new priority
- void Reschedule(u32 priority, std::size_t core, Thread* thread);
-
- /// Unschedules a thread.
- void Unschedule(u32 priority, std::size_t core, Thread* thread);
-
- /**
- * Transfers a thread into an specific core. If the destination_core is -1
- * it will be unscheduled from its source code and added into its suggested
- * queue.
- */
- void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
-
- bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
-
- static constexpr u32 min_regular_priority = 2;
- std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
- scheduled_queue;
- std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
- suggested_queue;
- std::atomic<bool> is_reselection_pending{false};
-
- // The priority levels at which the global scheduler preempts threads every 10 ms. They are
- // ordered from Core 0 to Core 3.
- std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
-
- /// Scheduler lock mechanisms.
- bool is_locked{};
- std::mutex inner_lock;
- std::atomic<s64> scope_lock{};
- Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
-
- Common::SpinLock global_list_guard{};
-
- /// Lists all thread ids that aren't deleted/etc.
- std::vector<std::shared_ptr<Thread>> thread_list;
- KernelCore& kernel;
-};
-
-class Scheduler final {
-public:
- explicit Scheduler(Core::System& system, std::size_t core_id);
- ~Scheduler();
-
- /// Returns whether there are any threads that are ready to run.
- bool HaveReadyThreads() const;
-
- /// Reschedules to the next available thread (call after current thread is suspended)
- void TryDoContextSwitch();
-
- /// The next two are for SingleCore Only.
- /// Unload current thread before preempting core.
- void Unload();
- /// Reload current thread after core preemption.
- void Reload();
-
- /// Gets the current running thread
- Thread* GetCurrentThread() const;
-
- /// Gets the currently selected thread from the top of the multilevel queue
- Thread* GetSelectedThread() const;
-
- /// Gets the timestamp for the last context switch in ticks.
- u64 GetLastContextSwitchTicks() const;
-
- bool ContextSwitchPending() const {
- return is_context_switch_pending;
- }
-
- void Initialize();
-
- /// Shutdowns the scheduler.
- void Shutdown();
-
- void OnThreadStart();
-
- std::shared_ptr<Common::Fiber>& ControlContext() {
- return switch_fiber;
- }
-
- const std::shared_ptr<Common::Fiber>& ControlContext() const {
- return switch_fiber;
- }
-
-private:
- friend class GlobalScheduler;
-
- /// Switches the CPU's active thread context to that of the specified thread
- void SwitchContext();
-
- /// When a thread wakes up, it must run this through it's new scheduler
- void SwitchContextStep2();
-
- /**
- * Called on every context switch to update the internal timestamp
- * This also updates the running time ticks for the given thread and
- * process using the following difference:
- *
- * ticks += most_recent_ticks - last_context_switch_ticks
- *
- * The internal tick timestamp for the scheduler is simply the
- * most recent tick count retrieved. No special arithmetic is
- * applied to it.
- */
- void UpdateLastContextSwitchTime(Thread* thread, Process* process);
-
- static void OnSwitch(void* this_scheduler);
- void SwitchToCurrent();
-
- std::shared_ptr<Thread> current_thread = nullptr;
- std::shared_ptr<Thread> selected_thread = nullptr;
- std::shared_ptr<Thread> current_thread_prev = nullptr;
- std::shared_ptr<Thread> selected_thread_set = nullptr;
- std::shared_ptr<Thread> idle_thread = nullptr;
-
- std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
-
- Core::System& system;
- u64 last_context_switch_time = 0;
- u64 idle_selection_count = 0;
- const std::size_t core_id;
-
- Common::SpinLock guard{};
-
- bool is_context_switch_pending = false;
-};
-
-class SchedulerLock {
-public:
- [[nodiscard]] explicit SchedulerLock(KernelCore& kernel);
- ~SchedulerLock();
-
-protected:
- KernelCore& kernel;
-};
-
-class SchedulerLockAndSleep : public SchedulerLock {
-public:
- explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
- s64 nanoseconds);
- ~SchedulerLockAndSleep();
-
- void CancelSleep() {
- sleep_cancelled = true;
- }
-
- void Release();
-
-private:
- Handle& event_handle;
- Thread* time_task;
- s64 nanoseconds;
- bool sleep_cancelled{};
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 8c19f2534..b40fe3916 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -14,9 +14,9 @@
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
@@ -25,19 +25,19 @@
namespace Kernel {
ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
-ServerSession::~ServerSession() = default;
+
+ServerSession::~ServerSession() {
+ kernel.ReleaseServiceThread(service_thread);
+}
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel,
std::shared_ptr<Session> parent,
std::string name) {
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
- session->request_event =
- Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) {
- session->CompleteSyncRequest();
- });
session->name = std::move(name);
session->parent = std::move(parent);
+ session->service_thread = kernel.CreateServiceThread(session->name);
return MakeResult(std::move(session));
}
@@ -130,8 +130,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
}
}
- LOG_CRITICAL(IPC, "Unknown domain command={}",
- static_cast<int>(domain_message_header.command.Value()));
+ LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
ASSERT(false);
return RESULT_SUCCESS;
}
@@ -143,16 +142,16 @@ ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
- request_queue.Push(std::move(context));
+
+ if (auto strong_ptr = service_thread.lock()) {
+ strong_ptr->QueueSyncRequest(*this, std::move(context));
+ return RESULT_SUCCESS;
+ }
return RESULT_SUCCESS;
}
-ResultCode ServerSession::CompleteSyncRequest() {
- ASSERT(!request_queue.Empty());
-
- auto& context = *request_queue.Front();
-
+ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
ResultCode result = RESULT_SUCCESS;
// If the session has been converted to a domain, handle the domain request
if (IsDomain() && context.HasDomainMessageHeader()) {
@@ -171,25 +170,20 @@ ResultCode ServerSession::CompleteSyncRequest() {
// Some service requests require the thread to block
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (!context.IsThreadWaiting()) {
context.GetThread().ResumeFromWait();
context.GetThread().SetSynchronizationResults(nullptr, result);
}
}
- request_queue.Pop();
-
return result;
}
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) {
- const ResultCode result = QueueSyncRequest(std::move(thread), memory);
- const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000};
- core_timing.ScheduleEvent(delay, request_event, {});
- return result;
+ return QueueSyncRequest(std::move(thread), memory);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index d23e9ec68..e8d1d99ea 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -10,6 +10,7 @@
#include <vector>
#include "common/threadsafe_queue.h"
+#include "core/hle/kernel/service_thread.h"
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
@@ -43,6 +44,8 @@ class Thread;
* TLS buffer and control is transferred back to it.
*/
class ServerSession final : public SynchronizationObject {
+ friend class ServiceThread;
+
public:
explicit ServerSession(KernelCore& kernel);
~ServerSession() override;
@@ -132,7 +135,7 @@ private:
ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
- ResultCode CompleteSyncRequest();
+ ResultCode CompleteSyncRequest(HLERequestContext& context);
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
/// object handle.
@@ -163,11 +166,8 @@ private:
/// The name of this session (optional)
std::string name;
- /// Core timing event used to schedule the service request at some point in the future
- std::shared_ptr<Core::Timing::EventType> request_event;
-
- /// Queue of scheduled service requests
- Common::MPSCQueue<std::shared_ptr<Kernel::HLERequestContext>> request_queue;
+ /// Thread to dispatch service requests
+ std::weak_ptr<ServiceThread> service_thread;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
new file mode 100644
index 000000000..ee46f3e21
--- /dev/null
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -0,0 +1,110 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <vector>
+#include <queue>
+
+#include "common/assert.h"
+#include "common/scope_exit.h"
+#include "common/thread.h"
+#include "core/core.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/server_session.h"
+#include "core/hle/kernel/service_thread.h"
+#include "core/hle/lock.h"
+#include "video_core/renderer_base.h"
+
+namespace Kernel {
+
+class ServiceThread::Impl final {
+public:
+ explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
+ ~Impl();
+
+ void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
+
+private:
+ std::vector<std::thread> threads;
+ std::queue<std::function<void()>> requests;
+ std::mutex queue_mutex;
+ std::condition_variable condition;
+ const std::string service_name;
+ bool stop{};
+};
+
+ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
+ : service_name{name} {
+ for (std::size_t i = 0; i < num_threads; ++i)
+ threads.emplace_back([this, &kernel] {
+ Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
+
+ // Wait for first request before trying to acquire a render context
+ {
+ std::unique_lock lock{queue_mutex};
+ condition.wait(lock, [this] { return stop || !requests.empty(); });
+ }
+
+ kernel.RegisterHostThread();
+
+ while (true) {
+ std::function<void()> task;
+
+ {
+ std::unique_lock lock{queue_mutex};
+ condition.wait(lock, [this] { return stop || !requests.empty(); });
+ if (stop || requests.empty()) {
+ return;
+ }
+ task = std::move(requests.front());
+ requests.pop();
+ }
+
+ task();
+ }
+ });
+}
+
+void ServiceThread::Impl::QueueSyncRequest(ServerSession& session,
+ std::shared_ptr<HLERequestContext>&& context) {
+ {
+ std::unique_lock lock{queue_mutex};
+
+ // ServerSession owns the service thread, so we cannot caption a strong pointer here in the
+ // event that the ServerSession is terminated.
+ std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)};
+ requests.emplace([weak_ptr, context{std::move(context)}]() {
+ if (auto strong_ptr = weak_ptr.lock()) {
+ strong_ptr->CompleteSyncRequest(*context);
+ }
+ });
+ }
+ condition.notify_one();
+}
+
+ServiceThread::Impl::~Impl() {
+ {
+ std::unique_lock lock{queue_mutex};
+ stop = true;
+ }
+ condition.notify_all();
+ for (std::thread& thread : threads) {
+ thread.join();
+ }
+}
+
+ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
+ : impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
+
+ServiceThread::~ServiceThread() = default;
+
+void ServiceThread::QueueSyncRequest(ServerSession& session,
+ std::shared_ptr<HLERequestContext>&& context) {
+ impl->QueueSyncRequest(session, std::move(context));
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h
new file mode 100644
index 000000000..025ab8fb5
--- /dev/null
+++ b/src/core/hle/kernel/service_thread.h
@@ -0,0 +1,28 @@
+// Copyright 2020 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <string>
+
+namespace Kernel {
+
+class HLERequestContext;
+class KernelCore;
+class ServerSession;
+
+class ServiceThread final {
+public:
+ explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
+ ~ServiceThread();
+
+ void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
+
+private:
+ class Impl;
+ std::unique_ptr<Impl> impl;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index bafd1ced7..de3ed25da 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -24,6 +24,8 @@
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/memory/memory_block.h"
#include "core/hle/kernel/memory/page_table.h"
@@ -32,7 +34,6 @@
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/readable_event.h"
#include "core/hle/kernel/resource_limit.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_types.h"
@@ -234,8 +235,7 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
u32 attribute) {
- return SetMemoryAttribute(system, static_cast<VAddr>(address), static_cast<std::size_t>(size),
- mask, attribute);
+ return SetMemoryAttribute(system, address, size, mask, attribute);
}
/// Maps a memory range into a different range.
@@ -255,8 +255,7 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
}
static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return MapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
- static_cast<std::size_t>(size));
+ return MapMemory(system, dst_addr, src_addr, size);
}
/// Unmaps a region that was previously mapped with svcMapMemory
@@ -276,8 +275,7 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
}
static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
- return UnmapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr),
- static_cast<std::size_t>(size));
+ return UnmapMemory(system, dst_addr, src_addr, size);
}
/// Connect to an OS service given the port name, returns the handle to the port to out
@@ -332,7 +330,8 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
/// Makes a blocking IPC call to an OS service.
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
- const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
+ auto& kernel = system.Kernel();
+ const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
if (!session) {
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
@@ -341,9 +340,9 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
- auto thread = system.CurrentScheduler().GetCurrentThread();
+ auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(kernel);
thread->InvalidateHLECallback();
thread->SetStatus(ThreadStatus::WaitIPC);
session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
@@ -352,12 +351,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
if (thread->HasHLECallback()) {
Handle event_handle = thread->GetHLETimeEvent();
if (event_handle != InvalidHandle) {
- auto& time_manager = system.Kernel().TimeManager();
+ auto& time_manager = kernel.TimeManager();
time_manager.UnscheduleTimeEvent(event_handle);
}
{
- SchedulerLock lock(system.Kernel());
+ KScopedSchedulerLock lock(kernel);
auto* sync_object = thread->GetHLESyncObject();
sync_object->RemoveWaitingThread(SharedFrom(thread));
}
@@ -531,8 +530,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand
static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
u32 mutex_addr, Handle requesting_thread_handle) {
- return ArbitrateLock(system, holding_thread_handle, static_cast<VAddr>(mutex_addr),
- requesting_thread_handle);
+ return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle);
}
/// Unlock a mutex
@@ -555,7 +553,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
}
static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
- return ArbitrateUnlock(system, static_cast<VAddr>(mutex_addr));
+ return ArbitrateUnlock(system, mutex_addr);
}
enum class BreakType : u32 {
@@ -658,7 +656,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
if (!break_reason.signal_debugger) {
- SchedulerLock lock(system.Kernel());
LOG_CRITICAL(
Debug_Emulated,
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
@@ -666,22 +663,18 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
- auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
+ auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
const auto thread_processor_id = current_thread->GetProcessorID();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
-
- // Kill the current thread
- system.Kernel().ExceptionalExit();
- current_thread->Stop();
}
}
static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
- Break(system, reason, static_cast<u64>(info1), static_cast<u64>(info2));
+ Break(system, reason, info1, info2);
}
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
-static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) {
+static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
if (len == 0) {
return;
}
@@ -922,7 +915,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
}
const auto& core_timing = system.CoreTiming();
- const auto& scheduler = system.CurrentScheduler();
+ const auto& scheduler = *system.Kernel().CurrentScheduler();
const auto* const current_thread = scheduler.GetCurrentThread();
const bool same_thread = current_thread == thread.get();
@@ -948,7 +941,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
u32 info_id, u32 handle, u32 sub_id_high) {
- const u64 sub_id{static_cast<u64>(sub_id_low | (static_cast<u64>(sub_id_high) << 32))};
+ const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
u64 res_value{};
const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
@@ -1009,7 +1002,7 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
}
static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return MapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
+ return MapPhysicalMemory(system, addr, size);
}
/// Unmaps memory previously mapped via MapPhysicalMemory
@@ -1063,7 +1056,7 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
}
static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
- return UnmapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size));
+ return UnmapPhysicalMemory(system, addr, size);
}
/// Sets the thread activity
@@ -1090,7 +1083,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
return ERR_INVALID_HANDLE;
}
- if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
+ if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
return ERR_BUSY;
}
@@ -1123,7 +1116,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
return ERR_INVALID_HANDLE;
}
- if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
+ if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
return ERR_BUSY;
}
@@ -1144,7 +1137,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
}
static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) {
- return GetThreadContext(system, static_cast<VAddr>(thread_context), handle);
+ return GetThreadContext(system, thread_context, handle);
}
/// Gets the priority for the specified thread
@@ -1281,8 +1274,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
u32 size, u32 permissions) {
- return MapSharedMemory(system, shared_memory_handle, static_cast<VAddr>(addr),
- static_cast<std::size_t>(size), permissions);
+ return MapSharedMemory(system, shared_memory_handle, addr, size, permissions);
}
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
@@ -1480,7 +1472,7 @@ static void ExitProcess(Core::System& system) {
current_process->PrepareForTermination();
// Kill the current thread
- system.CurrentScheduler().GetCurrentThread()->Stop();
+ system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop();
}
static void ExitProcess32(Core::System& system) {
@@ -1552,8 +1544,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
- return CreateThread(system, out_handle, static_cast<VAddr>(entry_point), static_cast<u64>(arg),
- static_cast<VAddr>(stack_top), priority, processor_id);
+ return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
}
/// Starts the thread for the provided handle
@@ -1581,8 +1572,8 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
- auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
- system.GlobalScheduler().RemoveThread(SharedFrom(current_thread));
+ auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
current_thread->Stop();
}
@@ -1592,53 +1583,39 @@ static void ExitThread32(Core::System& system) {
/// Sleep the current thread
static void SleepThread(Core::System& system, s64 nanoseconds) {
- LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
+ LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
enum class SleepType : s64 {
- YieldWithoutLoadBalancing = 0,
- YieldWithLoadBalancing = -1,
+ YieldWithoutCoreMigration = 0,
+ YieldWithCoreMigration = -1,
YieldAndWaitForLoadBalancing = -2,
};
- auto& scheduler = system.CurrentScheduler();
- auto* const current_thread = scheduler.GetCurrentThread();
- bool is_redundant = false;
-
+ auto& scheduler = *system.Kernel().CurrentScheduler();
if (nanoseconds <= 0) {
switch (static_cast<SleepType>(nanoseconds)) {
- case SleepType::YieldWithoutLoadBalancing: {
- auto pair = current_thread->YieldSimple();
- is_redundant = pair.second;
+ case SleepType::YieldWithoutCoreMigration: {
+ scheduler.YieldWithoutCoreMigration();
break;
}
- case SleepType::YieldWithLoadBalancing: {
- auto pair = current_thread->YieldAndBalanceLoad();
- is_redundant = pair.second;
+ case SleepType::YieldWithCoreMigration: {
+ scheduler.YieldWithCoreMigration();
break;
}
case SleepType::YieldAndWaitForLoadBalancing: {
- auto pair = current_thread->YieldAndWaitForLoadBalancing();
- is_redundant = pair.second;
+ scheduler.YieldToAnyThread();
break;
}
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
} else {
- current_thread->Sleep(nanoseconds);
- }
-
- if (is_redundant && !system.Kernel().IsMulticore()) {
- system.Kernel().ExitSVCProfile();
- system.CoreTiming().AddTicks(1000U);
- system.GetCpuManager().PreemptSingleCore();
- system.Kernel().EnterSVCProfile();
+ scheduler.GetCurrentThread()->Sleep(nanoseconds);
}
}
static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
- const s64 nanoseconds = static_cast<s64>(static_cast<u64>(nanoseconds_low) |
- (static_cast<u64>(nanoseconds_high) << 32));
+ const auto nanoseconds = static_cast<s64>(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32));
SleepThread(system, nanoseconds);
}
@@ -1668,10 +1645,10 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
auto& kernel = system.Kernel();
Handle event_handle;
- Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
- auto* const current_process = system.Kernel().CurrentProcess();
+ Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ auto* const current_process = kernel.CurrentProcess();
{
- SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
+ KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
const auto& handle_table = current_process->GetHandleTable();
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
ASSERT(thread);
@@ -1707,7 +1684,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
}
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
auto* owner = current_thread->GetLockOwner();
if (owner != nullptr) {
@@ -1724,10 +1701,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
u32 condition_variable_addr, Handle thread_handle,
u32 nanoseconds_low, u32 nanoseconds_high) {
- const s64 nanoseconds =
- static_cast<s64>(nanoseconds_low | (static_cast<u64>(nanoseconds_high) << 32));
- return WaitProcessWideKeyAtomic(system, static_cast<VAddr>(mutex_addr),
- static_cast<VAddr>(condition_variable_addr), thread_handle,
+ const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32));
+ return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
nanoseconds);
}
@@ -1740,7 +1715,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
// Retrieve a list of all threads that are waiting for this condition variable.
auto& kernel = system.Kernel();
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
auto* const current_process = kernel.CurrentProcess();
std::vector<std::shared_ptr<Thread>> waiting_threads =
current_process->GetConditionVariableThreads(condition_variable_addr);
@@ -1833,8 +1808,8 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
u32 timeout_low, u32 timeout_high) {
- s64 timeout = static_cast<s64>(timeout_low | (static_cast<u64>(timeout_high) << 32));
- return WaitForAddress(system, static_cast<VAddr>(address), type, value, timeout);
+ const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32));
+ return WaitForAddress(system, address, type, value, timeout);
}
// Signals to an address (via Address Arbiter)
@@ -1862,7 +1837,7 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type,
static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
s32 num_to_wake) {
- return SignalToAddress(system, static_cast<VAddr>(address), type, value, num_to_wake);
+ return SignalToAddress(system, address, type, value, num_to_wake);
}
static void KernelDebug([[maybe_unused]] Core::System& system,
@@ -1893,7 +1868,7 @@ static u64 GetSystemTick(Core::System& system) {
}
static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
- u64 time = GetSystemTick(system);
+ const auto time = GetSystemTick(system);
*time_low = static_cast<u32>(time);
*time_high = static_cast<u32>(time >> 32);
}
@@ -1984,8 +1959,7 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size,
u32 permissions) {
- return CreateTransferMemory(system, handle, static_cast<VAddr>(addr),
- static_cast<std::size_t>(size), permissions);
+ return CreateTransferMemory(system, handle, addr, size, permissions);
}
static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
@@ -2003,7 +1977,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
}
*core = thread->GetIdealCore();
- *mask = thread->GetAffinityMask();
+ *mask = thread->GetAffinityMask().GetAffinityMask();
return RESULT_SUCCESS;
}
@@ -2075,8 +2049,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
u32 affinity_mask_low, u32 affinity_mask_high) {
- const u64 affinity_mask =
- static_cast<u64>(affinity_mask_low) | (static_cast<u64>(affinity_mask_high) << 32);
+ const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
return SetThreadCoreMask(system, thread_handle, core, affinity_mask);
}
@@ -2341,9 +2314,10 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return RESULT_SUCCESS;
}
-static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address,
- u32 size) {
- // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope
+static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
+ [[maybe_unused]] Handle handle,
+ [[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
+ // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
// as all emulation is done in the same cache level in host architecture, thus data cache
// does not need flushing.
LOG_DEBUG(Kernel_SVC, "called");
@@ -2639,6 +2613,9 @@ void Call(Core::System& system, u32 immediate) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
+ auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
+ thread->SetContinuousOnSVC(true);
+
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
: GetSVCInfo32(immediate);
if (info) {
@@ -2652,6 +2629,12 @@ void Call(Core::System& system, u32 immediate) {
}
kernel.ExitSVCProfile();
+
+ if (!thread->IsContinuousOnSVC()) {
+ auto* host_context = thread->GetHostContext().get();
+ host_context->Rewind();
+ }
+
system.EnterDynarmicProfile();
}
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 986724beb..11e1d8e2d 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -23,8 +23,8 @@ enum class MemoryState : u32 {
Ipc = 0x0A,
Stack = 0x0B,
ThreadLocal = 0x0C,
- Transfered = 0x0D,
- SharedTransfered = 0x0E,
+ Transferred = 0x0D,
+ SharedTransferred = 0x0E,
SharedCode = 0x0F,
Inaccessible = 0x10,
NonSecureIpc = 0x11,
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index 8b875d853..d3f520ea2 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -5,8 +5,9 @@
#include "core/core.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/synchronization.h"
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/kernel/thread.h"
@@ -18,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {}
void Synchronization::SignalObject(SynchronizationObject& obj) const {
auto& kernel = system.Kernel();
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (obj.IsSignaled()) {
for (auto thread : obj.GetWaitingThreads()) {
if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
@@ -37,10 +38,10 @@ void Synchronization::SignalObject(SynchronizationObject& obj) const {
std::pair<ResultCode, Handle> Synchronization::WaitFor(
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
auto& kernel = system.Kernel();
- auto* const thread = system.CurrentScheduler().GetCurrentThread();
+ auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
Handle event_handle = InvalidHandle;
{
- SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
+ KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
const auto itr =
std::find_if(sync_objects.begin(), sync_objects.end(),
[thread](const std::shared_ptr<SynchronizationObject>& object) {
@@ -89,7 +90,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
}
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
ResultCode signaling_result = thread->GetSignalingResult();
SynchronizationObject* signaling_object = thread->GetSignalingObject();
thread->SetSynchronizationObjects(nullptr);
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
index f89b24204..7408ed51f 100644
--- a/src/core/hle/kernel/synchronization_object.h
+++ b/src/core/hle/kernel/synchronization_object.h
@@ -4,6 +4,7 @@
#pragma once
+#include <atomic>
#include <memory>
#include <vector>
@@ -56,7 +57,7 @@ public:
void ClearWaitingThreads();
protected:
- bool is_signaled{}; // Tells if this sync object is signalled;
+ std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
private:
/// Threads waiting for this object to become available
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index d132aba34..a4f9e0d97 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -12,17 +12,16 @@
#include "common/fiber.h"
#include "common/logging/log.h"
#include "common/thread_queue_list.h"
-#include "core/arm/arm_interface.h"
-#include "core/arm/unicorn/arm_unicorn.h"
#include "core/core.h"
#include "core/cpu_manager.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
@@ -52,7 +51,7 @@ Thread::~Thread() = default;
void Thread::Stop() {
{
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
SetStatus(ThreadStatus::Dead);
Signal();
kernel.GlobalHandleTable().Close(global_handle);
@@ -63,14 +62,13 @@ void Thread::Stop() {
// Mark the TLS slot in the thread's page as free.
owner_process->FreeTLSRegion(tls_address);
}
- arm_interface.reset();
has_exited = true;
}
global_handle = 0;
}
void Thread::ResumeFromWait() {
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
switch (status) {
case ThreadStatus::Paused:
case ThreadStatus::WaitSynch:
@@ -91,10 +89,6 @@ void Thread::ResumeFromWait() {
// before actually resuming. We can ignore subsequent wakeups if the thread status has
// already been set to ThreadStatus::Ready.
return;
-
- case ThreadStatus::Running:
- DEBUG_ASSERT_MSG(false, "Thread with object id {} has already resumed.", GetObjectId());
- return;
case ThreadStatus::Dead:
// This should never happen, as threads must complete before being stopped.
DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
@@ -106,19 +100,18 @@ void Thread::ResumeFromWait() {
}
void Thread::OnWakeUp() {
- SchedulerLock lock(kernel);
-
+ KScopedSchedulerLock lock(kernel);
SetStatus(ThreadStatus::Ready);
}
ResultCode Thread::Start() {
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
SetStatus(ThreadStatus::Ready);
return RESULT_SUCCESS;
}
void Thread::CancelWait() {
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
is_sync_cancelled = true;
return;
@@ -193,12 +186,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
thread->status = ThreadStatus::Dormant;
thread->entry_point = entry_point;
thread->stack_top = stack_top;
+ thread->disable_count = 1;
thread->tpidr_el0 = 0;
thread->nominal_priority = thread->current_priority = priority;
- thread->last_running_ticks = 0;
+ thread->schedule_count = -1;
+ thread->last_scheduled_tick = 0;
thread->processor_id = processor_id;
thread->ideal_core = processor_id;
- thread->affinity_mask = 1ULL << processor_id;
+ thread->affinity_mask.SetAffinity(processor_id, true);
thread->wait_objects = nullptr;
thread->mutex_wait_address = 0;
thread->condvar_wait_address = 0;
@@ -208,7 +203,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
thread->owner_process = owner_process;
thread->type = type_flags;
if ((type_flags & THREADTYPE_IDLE) == 0) {
- auto& scheduler = kernel.GlobalScheduler();
+ auto& scheduler = kernel.GlobalSchedulerContext();
scheduler.AddThread(thread);
}
if (owner_process) {
@@ -217,33 +212,10 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
} else {
thread->tls_address = 0;
}
+
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
// to initialize the context
- thread->arm_interface.reset();
if ((type_flags & THREADTYPE_HLE) == 0) {
-#ifdef ARCHITECTURE_x86_64
- if (owner_process && !owner_process->Is64BitProcess()) {
- thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
- system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
- processor_id);
- } else {
- thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
- system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
- processor_id);
- }
-
-#else
- if (owner_process && !owner_process->Is64BitProcess()) {
- thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
- system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32,
- processor_id);
- } else {
- thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
- system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64,
- processor_id);
- }
- LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
-#endif
ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
static_cast<u32>(entry_point), static_cast<u32>(arg));
ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
@@ -255,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
}
void Thread::SetPriority(u32 priority) {
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
"Invalid priority value.");
nominal_priority = priority;
@@ -279,14 +251,6 @@ VAddr Thread::GetCommandBufferAddress() const {
return GetTLSAddress() + command_header_offset;
}
-Core::ARM_Interface& Thread::ArmInterface() {
- return *arm_interface;
-}
-
-const Core::ARM_Interface& Thread::ArmInterface() const {
- return *arm_interface;
-}
-
void Thread::SetStatus(ThreadStatus new_status) {
if (new_status == status) {
return;
@@ -294,7 +258,6 @@ void Thread::SetStatus(ThreadStatus new_status) {
switch (new_status) {
case ThreadStatus::Ready:
- case ThreadStatus::Running:
SetSchedulingStatus(ThreadSchedStatus::Runnable);
break;
case ThreadStatus::Dormant:
@@ -401,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
}
ResultCode Thread::SetActivity(ThreadActivity value) {
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
auto sched_status = GetSchedulingStatus();
@@ -430,7 +393,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) {
ResultCode Thread::Sleep(s64 nanoseconds) {
Handle event_handle{};
{
- SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
+ KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
SetStatus(ThreadStatus::WaitSleep);
}
@@ -441,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
return RESULT_SUCCESS;
}
-std::pair<ResultCode, bool> Thread::YieldSimple() {
- bool is_redundant = false;
- {
- SchedulerLock lock(kernel);
- is_redundant = kernel.GlobalScheduler().YieldThread(this);
- }
- return {RESULT_SUCCESS, is_redundant};
-}
-
-std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
- bool is_redundant = false;
- {
- SchedulerLock lock(kernel);
- is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
- }
- return {RESULT_SUCCESS, is_redundant};
-}
-
-std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
- bool is_redundant = false;
- {
- SchedulerLock lock(kernel);
- is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
- }
- return {RESULT_SUCCESS, is_redundant};
-}
-
void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
const u32 old_state = scheduling_state;
pausing_state |= static_cast<u32>(flag);
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
scheduling_state = base_scheduling | pausing_state;
- kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
@@ -481,23 +417,24 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
pausing_state &= ~static_cast<u32>(flag);
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
scheduling_state = base_scheduling | pausing_state;
- kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
const u32 old_state = scheduling_state;
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
static_cast<u32>(new_status);
- kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state);
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
void Thread::SetCurrentPriority(u32 new_priority) {
const u32 old_priority = std::exchange(current_priority, new_priority);
- kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority);
+ KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
+ old_priority);
}
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
- SchedulerLock lock(kernel);
+ KScopedSchedulerLock lock(kernel);
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
if (((mask >> core) & 1) != 0) {
@@ -518,20 +455,21 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
}
if (use_override) {
ideal_core_override = new_core;
- affinity_mask_override = new_affinity_mask;
} else {
- const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
+ const auto old_affinity_mask = affinity_mask;
+ affinity_mask.SetAffinityMask(new_affinity_mask);
ideal_core = new_core;
- if (old_affinity_mask != new_affinity_mask) {
+ if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
const s32 old_core = processor_id;
- if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
+ if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
if (static_cast<s32>(ideal_core) < 0) {
- processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
+ processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
+ Core::Hardware::NUM_CPU_CORES);
} else {
processor_id = ideal_core;
}
}
- kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core);
+ KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
}
}
return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 8daf79fac..11ef29888 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -4,6 +4,7 @@
#pragma once
+#include <array>
#include <functional>
#include <string>
#include <utility>
@@ -12,6 +13,7 @@
#include "common/common_types.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
+#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
@@ -27,10 +29,10 @@ class System;
namespace Kernel {
-class GlobalScheduler;
+class GlobalSchedulerContext;
class KernelCore;
class Process;
-class Scheduler;
+class KScheduler;
enum ThreadPriority : u32 {
THREADPRIO_HIGHEST = 0, ///< Highest thread priority
@@ -72,7 +74,6 @@ enum ThreadProcessorId : s32 {
};
enum class ThreadStatus {
- Running, ///< Currently running
Ready, ///< Ready to run
Paused, ///< Paused by SetThreadActivity or debug
WaitHLEEvent, ///< Waiting for hle event to finish
@@ -248,10 +249,6 @@ public:
void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
- Core::ARM_Interface& ArmInterface();
-
- const Core::ARM_Interface& ArmInterface() const;
-
SynchronizationObject* GetSignalingObject() const {
return signaling_object;
}
@@ -350,8 +347,12 @@ public:
void SetStatus(ThreadStatus new_status);
- u64 GetLastRunningTicks() const {
- return last_running_ticks;
+ s64 GetLastScheduledTick() const {
+ return this->last_scheduled_tick;
+ }
+
+ void SetLastScheduledTick(s64 tick) {
+ this->last_scheduled_tick = tick;
}
u64 GetTotalCPUTimeTicks() const {
@@ -366,10 +367,18 @@ public:
return processor_id;
}
+ s32 GetActiveCore() const {
+ return GetProcessorID();
+ }
+
void SetProcessorID(s32 new_core) {
processor_id = new_core;
}
+ void SetActiveCore(s32 new_core) {
+ processor_id = new_core;
+ }
+
Process* GetOwnerProcess() {
return owner_process;
}
@@ -474,7 +483,7 @@ public:
return ideal_core;
}
- u64 GetAffinityMask() const {
+ const KAffinityMask& GetAffinityMask() const {
return affinity_mask;
}
@@ -483,21 +492,12 @@ public:
/// Sleeps this thread for the given amount of nanoseconds.
ResultCode Sleep(s64 nanoseconds);
- /// Yields this thread without rebalancing loads.
- std::pair<ResultCode, bool> YieldSimple();
-
- /// Yields this thread and does a load rebalancing.
- std::pair<ResultCode, bool> YieldAndBalanceLoad();
-
- /// Yields this thread and if the core is left idle, loads are rebalanced
- std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
-
- void IncrementYieldCount() {
- yield_count++;
+ s64 GetYieldScheduleCount() const {
+ return this->schedule_count;
}
- u64 GetYieldCount() const {
- return yield_count;
+ void SetYieldScheduleCount(s64 count) {
+ this->schedule_count = count;
}
ThreadSchedStatus GetSchedulingStatus() const {
@@ -573,9 +573,59 @@ public:
return has_exited;
}
+ class QueueEntry {
+ public:
+ constexpr QueueEntry() = default;
+
+ constexpr void Initialize() {
+ this->prev = nullptr;
+ this->next = nullptr;
+ }
+
+ constexpr Thread* GetPrev() const {
+ return this->prev;
+ }
+ constexpr Thread* GetNext() const {
+ return this->next;
+ }
+ constexpr void SetPrev(Thread* thread) {
+ this->prev = thread;
+ }
+ constexpr void SetNext(Thread* thread) {
+ this->next = thread;
+ }
+
+ private:
+ Thread* prev{};
+ Thread* next{};
+ };
+
+ QueueEntry& GetPriorityQueueEntry(s32 core) {
+ return this->per_core_priority_queue_entry[core];
+ }
+
+ const QueueEntry& GetPriorityQueueEntry(s32 core) const {
+ return this->per_core_priority_queue_entry[core];
+ }
+
+ s32 GetDisableDispatchCount() const {
+ return disable_count;
+ }
+
+ void DisableDispatch() {
+ ASSERT(GetDisableDispatchCount() >= 0);
+ disable_count++;
+ }
+
+ void EnableDispatch() {
+ ASSERT(GetDisableDispatchCount() > 0);
+ disable_count--;
+ }
+
private:
- friend class GlobalScheduler;
- friend class Scheduler;
+ friend class GlobalSchedulerContext;
+ friend class KScheduler;
+ friend class Process;
void SetSchedulingStatus(ThreadSchedStatus new_status);
void AddSchedulingFlag(ThreadSchedFlags flag);
@@ -586,15 +636,16 @@ private:
Common::SpinLock context_guard{};
ThreadContext32 context_32{};
ThreadContext64 context_64{};
- std::unique_ptr<Core::ARM_Interface> arm_interface{};
std::shared_ptr<Common::Fiber> host_context{};
- u64 thread_id = 0;
-
ThreadStatus status = ThreadStatus::Dormant;
+ u32 scheduling_state = 0;
+
+ u64 thread_id = 0;
VAddr entry_point = 0;
VAddr stack_top = 0;
+ std::atomic_int disable_count = 0;
ThreadType type;
@@ -608,9 +659,8 @@ private:
u32 current_priority = 0;
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
- u64 last_running_ticks = 0; ///< CPU tick when thread was last running
- u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
- ///< a redundant yield is one where no scheduling is changed
+ s64 schedule_count{};
+ s64 last_scheduled_tick{};
s32 processor_id = 0;
@@ -652,16 +702,16 @@ private:
Handle hle_time_event;
SynchronizationObject* hle_object;
- Scheduler* scheduler = nullptr;
+ KScheduler* scheduler = nullptr;
+
+ std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
u32 ideal_core{0xFFFFFFFF};
- u64 affinity_mask{0x1};
+ KAffinityMask affinity_mask{};
s32 ideal_core_override = -1;
- u64 affinity_mask_override = 0x1;
u32 affinity_override_count = 0;
- u32 scheduling_state = 0;
u32 pausing_state = 0;
bool is_running = false;
bool is_waiting_on_sync = false;
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 95f2446c9..79628e2b4 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -7,8 +7,8 @@
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/hle/kernel/handle_table.h"
+#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
@@ -18,17 +18,27 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
time_manager_event_type = Core::Timing::CreateEvent(
"Kernel::TimeManagerCallback",
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
- const SchedulerLock lock(system.Kernel());
+ const KScopedSchedulerLock lock(system.Kernel());
const auto proper_handle = static_cast<Handle>(thread_handle);
- if (cancelled_events[proper_handle]) {
- return;
+
+ std::shared_ptr<Thread> thread;
+ {
+ std::lock_guard lock{mutex};
+ if (cancelled_events[proper_handle]) {
+ return;
+ }
+ thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
+ }
+
+ if (thread) {
+ // Thread can be null if process has exited
+ thread->OnWakeUp();
}
- auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
- thread->OnWakeUp();
});
}
void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
+ std::lock_guard lock{mutex};
event_handle = timetask->GetGlobalHandle();
if (nanoseconds > 0) {
ASSERT(timetask);
@@ -43,6 +53,7 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64
}
void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
+ std::lock_guard lock{mutex};
if (event_handle == InvalidHandle) {
return;
}
@@ -51,7 +62,8 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
}
void TimeManager::CancelTimeEvent(Thread* time_task) {
- Handle event_handle = time_task->GetGlobalHandle();
+ std::lock_guard lock{mutex};
+ const Handle event_handle = time_task->GetGlobalHandle();
UnscheduleTimeEvent(event_handle);
}
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index 307a18765..f39df39a0 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -5,6 +5,7 @@
#pragma once
#include <memory>
+#include <mutex>
#include <unordered_map>
#include "core/hle/kernel/object.h"
@@ -42,6 +43,7 @@ private:
Core::System& system;
std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
std::unordered_map<Handle, bool> cancelled_events;
+ std::mutex mutex;
};
} // namespace Kernel