summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/kernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/kernel.cpp132
1 files changed, 74 insertions, 58 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 73593c7a0..ce7fa8275 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -17,7 +17,6 @@
#include "common/thread.h"
#include "common/thread_worker.h"
#include "core/arm/arm_interface.h"
-#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
@@ -64,8 +63,6 @@ struct KernelCore::Impl {
is_phantom_mode_for_singlecore = false;
- InitializePhysicalCores();
-
// Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
DeriveInitialMemoryLayout();
@@ -75,16 +72,16 @@ struct KernelCore::Impl {
InitializeSystemResourceLimit(kernel, system.CoreTiming());
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
- InitializeSchedulers();
InitializeShutdownThreads();
InitializePreemption(kernel);
+ InitializePhysicalCores();
RegisterHostThread();
}
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id].Initialize((*current_process).Is64BitProcess());
+ cores[core_id]->Initialize((*current_process).Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
}
}
@@ -95,26 +92,16 @@ struct KernelCore::Impl {
process_list.clear();
- // Close all open server sessions and ports.
- std::unordered_set<KAutoObject*> server_objects_;
- {
- std::scoped_lock lk(server_objects_lock);
- server_objects_ = server_objects;
- server_objects.clear();
- }
- for (auto* server_object : server_objects_) {
- server_object->Close();
- }
-
- // Ensures all service threads gracefully shutdown.
- ClearServiceThreads();
+ CloseServices();
next_object_id = 0;
next_kernel_process_id = KProcess::InitialKIPIDMin;
next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1;
- cores.clear();
+ for (auto& core : cores) {
+ core = nullptr;
+ }
global_handle_table->Finalize();
global_handle_table.reset();
@@ -148,7 +135,6 @@ struct KernelCore::Impl {
shutdown_threads[core_id] = nullptr;
}
- schedulers[core_id]->Finalize();
schedulers[core_id].reset();
}
@@ -191,18 +177,41 @@ struct KernelCore::Impl {
global_object_list_container.reset();
}
+ void CloseServices() {
+ // Close all open server sessions and ports.
+ std::unordered_set<KAutoObject*> server_objects_;
+ {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects_ = server_objects;
+ server_objects.clear();
+ }
+ for (auto* server_object : server_objects_) {
+ server_object->Close();
+ }
+
+ // Ensures all service threads gracefully shutdown.
+ ClearServiceThreads();
+ }
+
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
- cores.emplace_back(i, system, *schedulers[i], interrupts);
- }
- }
+ const s32 core{static_cast<s32>(i)};
- void InitializeSchedulers() {
- for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- cores[i].Scheduler().Initialize();
+ schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel());
+ cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]);
+
+ auto* main_thread{Kernel::KThread::Create(system.Kernel())};
+ main_thread->SetName(fmt::format("MainThread:{}", core));
+ main_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
+
+ auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
+ idle_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
+
+ schedulers[i]->Initialize(main_thread, idle_thread, core);
}
}
@@ -234,17 +243,18 @@ struct KernelCore::Impl {
void InitializePreemption(KernelCore& kernel) {
preemption_event = Core::Timing::CreateEvent(
- "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
+ "PreemptionCallback",
+ [this, &kernel](std::uintptr_t, s64 time,
+ std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
{
KScopedSchedulerLock lock(kernel);
global_scheduler_context->PreemptThreads();
}
- const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ return std::nullopt;
});
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
void InitializeShutdownThreads() {
@@ -254,7 +264,6 @@ struct KernelCore::Impl {
core_id)
.IsSuccess());
shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
- shutdown_threads[core_id]->DisableDispatch();
}
}
@@ -332,6 +341,8 @@ struct KernelCore::Impl {
return is_shutting_down.load(std::memory_order_relaxed);
}
+ static inline thread_local KThread* current_thread{nullptr};
+
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
@@ -342,7 +353,12 @@ struct KernelCore::Impl {
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
}
- return schedulers[thread_id]->GetCurrentThread();
+
+ return current_thread;
+ }
+
+ void SetCurrentEmuThread(KThread* thread) {
+ current_thread = thread;
}
void DeriveInitialMemoryLayout() {
@@ -746,7 +762,7 @@ struct KernelCore::Impl {
std::unordered_set<KAutoObject*> registered_in_use_objects;
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
- std::vector<Kernel::PhysicalCore> cores;
+ std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
@@ -770,7 +786,6 @@ struct KernelCore::Impl {
Common::ThreadWorker service_threads_manager;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads;
- std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
bool is_multicore{};
@@ -806,6 +821,10 @@ void KernelCore::Shutdown() {
impl->Shutdown();
}
+void KernelCore::CloseServices() {
+ impl->CloseServices();
+}
+
const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit;
}
@@ -855,11 +874,11 @@ const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
}
Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
- return impl->cores[id];
+ return *impl->cores[id];
}
const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
- return impl->cores[id];
+ return *impl->cores[id];
}
size_t KernelCore::CurrentPhysicalCoreIndex() const {
@@ -871,11 +890,11 @@ size_t KernelCore::CurrentPhysicalCoreIndex() const {
}
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
- return impl->cores[CurrentPhysicalCoreIndex()];
+ return *impl->cores[CurrentPhysicalCoreIndex()];
}
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
- return impl->cores[CurrentPhysicalCoreIndex()];
+ return *impl->cores[CurrentPhysicalCoreIndex()];
}
Kernel::KScheduler* KernelCore::CurrentScheduler() {
@@ -887,15 +906,6 @@ Kernel::KScheduler* KernelCore::CurrentScheduler() {
return impl->schedulers[core_id].get();
}
-std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
- return impl->interrupts;
-}
-
-const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
- const {
- return impl->interrupts;
-}
-
Kernel::TimeManager& KernelCore::TimeManager() {
return impl->time_manager;
}
@@ -920,24 +930,18 @@ const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
return *impl->global_object_list_container;
}
-void KernelCore::InterruptAllPhysicalCores() {
- for (auto& physical_core : impl->cores) {
- physical_core.Interrupt();
- }
-}
-
void KernelCore::InvalidateAllInstructionCaches() {
for (auto& physical_core : impl->cores) {
- physical_core.ArmInterface().ClearInstructionCache();
+ physical_core->ArmInterface().ClearInstructionCache();
}
}
void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
for (auto& physical_core : impl->cores) {
- if (!physical_core.IsInitialized()) {
+ if (!physical_core->IsInitialized()) {
continue;
}
- physical_core.ArmInterface().InvalidateCacheRange(addr, size);
+ physical_core->ArmInterface().InvalidateCacheRange(addr, size);
}
}
@@ -1025,6 +1029,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
return impl->GetCurrentEmuThread();
}
+void KernelCore::SetCurrentEmuThread(KThread* thread) {
+ impl->SetCurrentEmuThread(thread);
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
@@ -1079,14 +1087,22 @@ void KernelCore::Suspend(bool suspended) {
for (auto* process : GetProcessList()) {
process->SetActivity(activity);
+
+ if (should_suspend) {
+ // Wait for execution to stop
+ for (auto* thread : process->GetThreadList()) {
+ thread->WaitUntilSuspended();
+ }
+ }
}
}
void KernelCore::ShutdownCores() {
+ KScopedSchedulerLock lk{*this};
+
for (auto* thread : impl->shutdown_threads) {
void(thread->Run());
}
- InterruptAllPhysicalCores();
}
bool KernelCore::IsMulticore() const {