summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/kernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
-rw-r--r--src/core/hle/kernel/kernel.cpp90
1 files changed, 61 insertions, 29 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 66c8f4455..f4072e1c3 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -64,8 +64,6 @@ struct KernelCore::Impl {
is_phantom_mode_for_singlecore = false;
- InitializePhysicalCores();
-
// Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
DeriveInitialMemoryLayout();
@@ -75,9 +73,9 @@ struct KernelCore::Impl {
InitializeSystemResourceLimit(kernel, system.CoreTiming());
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
- InitializeSchedulers();
InitializeShutdownThreads();
InitializePreemption(kernel);
+ InitializePhysicalCores();
RegisterHostThread();
}
@@ -95,19 +93,7 @@ struct KernelCore::Impl {
process_list.clear();
- // Close all open server sessions and ports.
- std::unordered_set<KAutoObject*> server_objects_;
- {
- std::scoped_lock lk(server_objects_lock);
- server_objects_ = server_objects;
- server_objects.clear();
- }
- for (auto* server_object : server_objects_) {
- server_object->Close();
- }
-
- // Ensures all service threads gracefully shutdown.
- ClearServiceThreads();
+ CloseServices();
next_object_id = 0;
next_kernel_process_id = KProcess::InitialKIPIDMin;
@@ -148,7 +134,6 @@ struct KernelCore::Impl {
shutdown_threads[core_id] = nullptr;
}
- schedulers[core_id]->Finalize();
schedulers[core_id].reset();
}
@@ -191,18 +176,41 @@ struct KernelCore::Impl {
global_object_list_container.reset();
}
+ void CloseServices() {
+ // Close all open server sessions and ports.
+ std::unordered_set<KAutoObject*> server_objects_;
+ {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects_ = server_objects;
+ server_objects.clear();
+ }
+ for (auto* server_object : server_objects_) {
+ server_object->Close();
+ }
+
+ // Ensures all service threads gracefully shutdown.
+ ClearServiceThreads();
+ }
+
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
+ const s32 core{static_cast<s32>(i)};
+
+ schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel());
cores.emplace_back(i, system, *schedulers[i], interrupts);
- }
- }
- void InitializeSchedulers() {
- for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- cores[i].Scheduler().Initialize();
+ auto* main_thread{Kernel::KThread::Create(system.Kernel())};
+ main_thread->SetName(fmt::format("MainThread:{}", core));
+ main_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
+
+ auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
+ idle_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
+
+ schedulers[i]->Initialize(main_thread, idle_thread, core);
}
}
@@ -234,17 +242,18 @@ struct KernelCore::Impl {
void InitializePreemption(KernelCore& kernel) {
preemption_event = Core::Timing::CreateEvent(
- "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
+ "PreemptionCallback",
+ [this, &kernel](std::uintptr_t, s64 time,
+ std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
{
KScopedSchedulerLock lock(kernel);
global_scheduler_context->PreemptThreads();
}
- const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ return std::nullopt;
});
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
void InitializeShutdownThreads() {
@@ -331,6 +340,8 @@ struct KernelCore::Impl {
return is_shutting_down.load(std::memory_order_relaxed);
}
+ static inline thread_local KThread* current_thread{nullptr};
+
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
@@ -341,7 +352,12 @@ struct KernelCore::Impl {
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
}
- return schedulers[thread_id]->GetCurrentThread();
+
+ return current_thread;
+ }
+
+ void SetCurrentEmuThread(KThread* thread) {
+ current_thread = thread;
}
void DeriveInitialMemoryLayout() {
@@ -805,6 +821,10 @@ void KernelCore::Shutdown() {
impl->Shutdown();
}
+void KernelCore::CloseServices() {
+ impl->CloseServices();
+}
+
const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit;
}
@@ -1024,6 +1044,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
return impl->GetCurrentEmuThread();
}
+void KernelCore::SetCurrentEmuThread(KThread* thread) {
+ impl->SetCurrentEmuThread(thread);
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
@@ -1078,14 +1102,22 @@ void KernelCore::Suspend(bool suspended) {
for (auto* process : GetProcessList()) {
process->SetActivity(activity);
+
+ if (should_suspend) {
+ // Wait for execution to stop
+ for (auto* thread : process->GetThreadList()) {
+ thread->WaitUntilSuspended();
+ }
+ }
}
}
void KernelCore::ShutdownCores() {
+ KScopedSchedulerLock lk{*this};
+
for (auto* thread : impl->shutdown_threads) {
void(thread->Run());
}
- InterruptAllPhysicalCores();
}
bool KernelCore::IsMulticore() const {