summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/hle_ipc.h5
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp15
-rw-r--r--src/core/hle/kernel/k_memory_manager.h15
-rw-r--r--src/core/hle/kernel/k_page_table.cpp17
-rw-r--r--src/core/hle/kernel/k_page_table.h1
-rw-r--r--src/core/hle/kernel/k_priority_queue.h36
-rw-r--r--src/core/hle/kernel/k_process.cpp6
-rw-r--r--src/core/hle/kernel/k_process.h6
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp11
-rw-r--r--src/core/hle/kernel/k_server_session.cpp20
-rw-r--r--src/core/hle/kernel/k_thread.cpp127
-rw-r--r--src/core/hle/kernel/k_thread.h31
-rw-r--r--src/core/hle/kernel/k_worker_task.h18
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.cpp42
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.h33
-rw-r--r--src/core/hle/kernel/kernel.cpp25
-rw-r--r--src/core/hle/kernel/kernel.h7
-rw-r--r--src/core/hle/kernel/service_thread.cpp5
18 files changed, 326 insertions, 94 deletions
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 55e6fb9f7..754b41ff6 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -341,10 +341,6 @@ public:
return *thread;
}
- bool IsThreadWaiting() const {
- return is_thread_waiting;
- }
-
private:
friend class IPC::ResponseBuilder;
@@ -379,7 +375,6 @@ private:
u32 domain_offset{};
std::shared_ptr<SessionRequestManager> manager;
- bool is_thread_waiting{};
KernelCore& kernel;
Core::Memory::Memory& memory;
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 0166df0a5..1b44541b1 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -8,12 +8,16 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
+#include "core/core.h"
+#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_linked_list.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
+KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {}
+
std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
const auto size{end_address - start_address};
@@ -81,7 +85,7 @@ VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size
}
ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir) {
+ Direction dir, u32 heap_fill_value) {
ASSERT(page_list.GetNumPages() == 0);
// Early return if we're allocating no pages
@@ -139,6 +143,12 @@ ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_
}
}
+ // Clear allocated memory.
+ for (const auto& it : page_list.Nodes()) {
+ std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
+ it.GetSize());
+ }
+
// Only succeed if we allocated as many pages as we wanted
if (num_pages) {
return ResultOutOfMemory;
@@ -146,11 +156,12 @@ ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_
// We succeeded!
group_guard.Cancel();
+
return ResultSuccess;
}
ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir) {
+ Direction dir, u32 heap_fill_value) {
// Early return if we're freeing no pages
if (!num_pages) {
return ResultSuccess;
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 39badc5f1..abd6c8ace 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -12,6 +12,10 @@
#include "core/hle/kernel/k_page_heap.h"
#include "core/hle/result.h"
+namespace Core {
+class System;
+}
+
namespace Kernel {
class KPageLinkedList;
@@ -42,7 +46,7 @@ public:
Mask = (0xF << Shift),
};
- KMemoryManager() = default;
+ explicit KMemoryManager(Core::System& system_);
constexpr std::size_t GetSize(Pool pool) const {
return managers[static_cast<std::size_t>(pool)].GetSize();
@@ -51,10 +55,10 @@ public:
void InitializeManager(Pool pool, u64 start_address, u64 end_address);
VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
- ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir = Direction::FromFront);
- ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir = Direction::FromFront);
+ ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
+ u32 heap_fill_value = 0);
+ ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
+ u32 heap_fill_value = 0);
static constexpr std::size_t MaxManagerCount = 10;
@@ -129,6 +133,7 @@ private:
};
private:
+ Core::System& system;
std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
std::array<Impl, MaxManagerCount> managers;
};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 27d86c9a4..b650ea31d 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -289,8 +289,8 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
}
KPageLinkedList page_linked_list;
- CASCADE_CODE(
- system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool));
+ CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
+ allocation_option));
CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
block_manager->Update(addr, num_pages, state, perm);
@@ -457,8 +457,8 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
KPageLinkedList page_linked_list;
- CASCADE_CODE(
- system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, memory_pool));
+ CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
+ memory_pool, allocation_option));
// We succeeded, so commit the memory reservation.
memory_reservation.Commit();
@@ -541,7 +541,8 @@ ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) {
}
const std::size_t num_pages{size / PageSize};
- system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool);
+ system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool,
+ allocation_option);
block_manager->Update(addr, num_pages, KMemoryState::Free);
@@ -960,7 +961,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Allocate pages for the heap extension.
KPageLinkedList page_linked_list;
R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize,
- memory_pool));
+ memory_pool, allocation_option));
// Map the pages.
{
@@ -1027,8 +1028,8 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
KPageLinkedList page_group;
- CASCADE_CODE(
- system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool));
+ CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages,
+ memory_pool, allocation_option));
CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
}
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 274644181..f67986e91 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -303,6 +303,7 @@ private:
bool is_aslr_enabled{};
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
+ KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
Common::PageTable page_table_impl;
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index f4d71ad7e..0b894c8cf 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -45,6 +45,7 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
{ t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
{ t.GetPriority() } -> Common::ConvertibleTo<s32>;
+ { t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
};
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
@@ -349,24 +350,49 @@ public:
// Mutators.
constexpr void PushBack(Member* member) {
+ // This is for host (dummy) threads that we do not want to enter the priority queue.
+ if (member->IsDummyThread()) {
+ return;
+ }
+
this->PushBack(member->GetPriority(), member);
}
constexpr void Remove(Member* member) {
+ // This is for host (dummy) threads that we do not want to enter the priority queue.
+ if (member->IsDummyThread()) {
+ return;
+ }
+
this->Remove(member->GetPriority(), member);
}
constexpr void MoveToScheduledFront(Member* member) {
+ // This is for host (dummy) threads that we do not want to enter the priority queue.
+ if (member->IsDummyThread()) {
+ return;
+ }
+
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
}
constexpr KThread* MoveToScheduledBack(Member* member) {
+ // This is for host (dummy) threads that we do not want to enter the priority queue.
+ if (member->IsDummyThread()) {
+ return {};
+ }
+
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
member);
}
// First class fancy operations.
constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
+ // This is for host (dummy) threads that we do not want to enter the priority queue.
+ if (member->IsDummyThread()) {
+ return;
+ }
+
ASSERT(IsValidPriority(prev_priority));
// Remove the member from the queues.
@@ -383,6 +409,11 @@ public:
constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
Member* member) {
+ // This is for host (dummy) threads that we do not want to enter the priority queue.
+ if (member->IsDummyThread()) {
+ return;
+ }
+
// Get the new information.
const s32 priority = member->GetPriority();
const AffinityMaskType& new_affinity = member->GetAffinityMask();
@@ -412,6 +443,11 @@ public:
}
constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
+ // This is for host (dummy) threads that we do not want to enter the priority queue.
+ if (member->IsDummyThread()) {
+ return;
+ }
+
// Get the new information.
const s32 new_core = member->GetActiveCore();
const s32 priority = member->GetPriority();
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index cca405fed..265ac6fa1 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -149,6 +149,10 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
return ResultSuccess;
}
+void KProcess::DoWorkerTaskImpl() {
+ UNIMPLEMENTED();
+}
+
KResourceLimit* KProcess::GetResourceLimit() const {
return resource_limit;
}
@@ -477,7 +481,7 @@ void KProcess::Finalize() {
}
// Perform inherited finalization.
- KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>::Finalize();
+ KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
}
/**
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index e7c8b5838..c2a672021 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -15,6 +15,7 @@
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -62,8 +63,7 @@ enum class ProcessStatus {
DebugBreak,
};
-class KProcess final
- : public KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject> {
+class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public:
@@ -345,6 +345,8 @@ public:
bool IsSignaled() const override;
+ void DoWorkerTaskImpl();
+
void PinCurrentThread(s32 core_id);
void UnpinCurrentThread(s32 core_id);
void UnpinThread(KThread* thread);
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 31cec990e..b32d4f285 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -49,8 +49,6 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
if (!must_context_switch || core != current_core) {
auto& phys_core = kernel.PhysicalCore(core);
phys_core.Interrupt();
- } else {
- must_context_switch = true;
}
cores_pending_reschedule &= ~(1ULL << core);
}
@@ -408,6 +406,9 @@ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduli
} else {
RescheduleCores(kernel, cores_needing_scheduling);
}
+
+ // Special case to ensure dummy threads that are waiting block.
+ current_thread->IfDummyThreadTryWait();
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -741,6 +742,12 @@ void KScheduler::ScheduleImpl() {
next_thread = idle_thread;
}
+ // We never want to schedule a dummy thread, as these are only used by host threads for locking.
+ if (next_thread->GetThreadType() == ThreadType::Dummy) {
+ ASSERT_MSG(false, "Dummy threads should never be scheduled!");
+ next_thread = idle_thread;
+ }
+
// If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) {
previous_thread->EnableDispatch();
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index d4e4a6b06..4d94eb9cf 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -8,7 +8,6 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
-#include "common/scope_exit.h"
#include "core/core_timing.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/hle_ipc.h"
@@ -123,20 +122,10 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
- // In the event that something fails here, stub a result to prevent the game from crashing.
- // This is a work-around in the event that somehow we process a service request after the
- // session has been closed by the game. This has been observed to happen rarely in Pokemon
- // Sword/Shield and is likely a result of us using host threads/scheduling for services.
- // TODO(bunnei): Find a better solution here.
- auto error_guard = SCOPE_GUARD({ CompleteSyncRequest(*context); });
-
// Ensure we have a session request handler
if (manager->HasSessionRequestHandler(*context)) {
if (auto strong_ptr = manager->GetServiceThread().lock()) {
strong_ptr->QueueSyncRequest(*parent, std::move(context));
-
- // We succeeded.
- error_guard.Cancel();
} else {
ASSERT_MSG(false, "strong_ptr is nullptr!");
}
@@ -171,13 +160,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
convert_to_domain = false;
}
- // Some service requests require the thread to block
- {
- KScopedSchedulerLock lock(kernel);
- if (!context.IsThreadWaiting()) {
- context.GetThread().EndWait(result);
- }
- }
+ // The calling thread is waiting for this request to complete, so wake it up.
+ context.GetThread().EndWait(result);
return result;
}
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 71e029a3f..f42abb8a1 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -30,6 +30,7 @@
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
@@ -105,7 +106,7 @@ KThread::~KThread() = default;
ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
- ASSERT((type == ThreadType::Main) ||
+ ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
ASSERT((owner != nullptr) || (type != ThreadType::User));
ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
@@ -139,7 +140,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break;
}
- thread_type_for_debugging = type;
+ thread_type = type;
// Set the ideal core ID and affinity mask.
virtual_ideal_core_id = virt_core;
@@ -261,7 +262,7 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
}
ResultCode KThread::InitializeDummyThread(KThread* thread) {
- return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
+ return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
}
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@@ -332,7 +333,7 @@ void KThread::Finalize() {
}
// Perform inherited finalization.
- KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>::Finalize();
+ KSynchronizationObject::Finalize();
}
bool KThread::IsSignaled() const {
@@ -376,11 +377,28 @@ void KThread::StartTermination() {
// Register terminated dpc flag.
RegisterDpc(DpcFlag::Terminated);
+}
+
+void KThread::FinishTermination() {
+ // Ensure that the thread is not executing on any core.
+ if (parent != nullptr) {
+ for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ KThread* core_thread{};
+ do {
+ core_thread = kernel.Scheduler(i).GetCurrentThread();
+ } while (core_thread == this);
+ }
+ }
// Close the thread.
this->Close();
}
+void KThread::DoWorkerTaskImpl() {
+ // Finish the termination that was begun by Exit().
+ this->FinishTermination();
+}
+
void KThread::Pin(s32 current_core) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
@@ -417,12 +435,7 @@ void KThread::Pin(s32 current_core) {
static_cast<u32>(ThreadState::SuspendShift)));
// Update our state.
- const ThreadState old_state = thread_state;
- thread_state = static_cast<ThreadState>(GetSuspendFlags() |
- static_cast<u32>(old_state & ThreadState::Mask));
- if (thread_state != old_state) {
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
- }
+ UpdateState();
}
// TODO(bunnei): Update our SVC access permissions.
@@ -463,20 +476,13 @@ void KThread::Unpin() {
}
// Allow performing thread suspension (if termination hasn't been requested).
- {
+ if (!IsTerminationRequested()) {
// Update our allow flags.
- if (!IsTerminationRequested()) {
- suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
- static_cast<u32>(ThreadState::SuspendShift)));
- }
+ suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
+ static_cast<u32>(ThreadState::SuspendShift)));
// Update our state.
- const ThreadState old_state = thread_state;
- thread_state = static_cast<ThreadState>(GetSuspendFlags() |
- static_cast<u32>(old_state & ThreadState::Mask));
- if (thread_state != old_state) {
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
- }
+ UpdateState();
}
// TODO(bunnei): Update our SVC access permissions.
@@ -689,12 +695,7 @@ void KThread::Resume(SuspendType type) {
~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
// Update our state.
- const ThreadState old_state = thread_state;
- thread_state = static_cast<ThreadState>(GetSuspendFlags() |
- static_cast<u32>(old_state & ThreadState::Mask));
- if (thread_state != old_state) {
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
- }
+ this->UpdateState();
}
void KThread::WaitCancel() {
@@ -721,19 +722,22 @@ void KThread::TrySuspend() {
ASSERT(GetNumKernelWaiters() == 0);
// Perform the suspend.
- Suspend();
+ this->UpdateState();
}
-void KThread::Suspend() {
+void KThread::UpdateState() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
- ASSERT(IsSuspendRequested());
// Set our suspend flags in state.
const auto old_state = thread_state;
- thread_state = static_cast<ThreadState>(GetSuspendFlags()) | (old_state & ThreadState::Mask);
+ const auto new_state =
+ static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
+ thread_state = new_state;
// Note the state change in scheduler.
- KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ if (new_state != old_state) {
+ KScheduler::OnThreadStateChanged(kernel, this, old_state);
+ }
}
void KThread::Continue() {
@@ -998,13 +1002,16 @@ ResultCode KThread::Run() {
// If the current thread has been asked to suspend, suspend it and retry.
if (GetCurrentThread(kernel).IsSuspended()) {
- GetCurrentThread(kernel).Suspend();
+ GetCurrentThread(kernel).UpdateState();
continue;
}
// If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
- if (IsUserThread() && IsSuspended()) {
- Suspend();
+ if (KProcess* owner = this->GetOwnerProcess(); owner != nullptr) {
+ if (IsUserThread() && IsSuspended()) {
+ this->UpdateState();
+ }
+ owner->IncrementThreadCount();
}
// Set our state and finish.
@@ -1031,9 +1038,16 @@ void KThread::Exit() {
// Disallow all suspension.
suspend_allowed_flags = 0;
+ this->UpdateState();
+
+ // Disallow all suspension.
+ suspend_allowed_flags = 0;
// Start termination.
StartTermination();
+
+ // Register the thread as a work task.
+ KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this);
}
}
@@ -1061,12 +1075,46 @@ ResultCode KThread::Sleep(s64 timeout) {
return ResultSuccess;
}
+void KThread::IfDummyThreadTryWait() {
+ if (!IsDummyThread()) {
+ return;
+ }
+
+ if (GetState() != ThreadState::Waiting) {
+ return;
+ }
+
+ // Block until we can grab the lock.
+ KScopedSpinLock lk{dummy_wait_lock};
+}
+
+void KThread::IfDummyThreadBeginWait() {
+ if (!IsDummyThread()) {
+ return;
+ }
+
+ // Ensure the thread will block when IfDummyThreadTryWait is called.
+ dummy_wait_lock.Lock();
+}
+
+void KThread::IfDummyThreadEndWait() {
+ if (!IsDummyThread()) {
+ return;
+ }
+
+ // Ensure the thread will no longer block.
+ dummy_wait_lock.Unlock();
+}
+
void KThread::BeginWait(KThreadQueue* queue) {
// Set our state as waiting.
SetState(ThreadState::Waiting);
// Set our wait queue.
wait_queue = queue;
+
+ // Special case for dummy threads to ensure they block.
+ IfDummyThreadBeginWait();
}
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@@ -1085,7 +1133,16 @@ void KThread::EndWait(ResultCode wait_result_) {
// If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) {
+ if (wait_queue == nullptr) {
+ // This should never happen, but avoid a hard crash below to get this logged.
+ ASSERT_MSG(false, "wait_queue is nullptr!");
+ return;
+ }
+
wait_queue->EndWait(this, wait_result_);
+
+ // Special case for dummy threads to wakeup if necessary.
+ IfDummyThreadEndWait();
}
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 83dfde69b..d058db62c 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -19,6 +19,7 @@
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_types.h"
@@ -100,7 +101,7 @@ enum class ThreadWaitReasonForDebugging : u32 {
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
-class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>,
+class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>,
public boost::intrusive::list_base_hook<> {
KERNEL_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
@@ -111,6 +112,7 @@ private:
public:
static constexpr s32 DefaultThreadPriority = 44;
static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
+ static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
explicit KThread(KernelCore& kernel_);
~KThread() override;
@@ -192,9 +194,9 @@ public:
void TrySuspend();
- void Continue();
+ void UpdateState();
- void Suspend();
+ void Continue();
constexpr void SetSyncedIndex(s32 index) {
synced_index = index;
@@ -385,6 +387,8 @@ public:
void OnTimer();
+ void DoWorkerTaskImpl();
+
static void PostDestroy(uintptr_t arg);
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
@@ -550,8 +554,12 @@ public:
return wait_reason_for_debugging;
}
- [[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
- return thread_type_for_debugging;
+ [[nodiscard]] ThreadType GetThreadType() const {
+ return thread_type;
+ }
+
+ [[nodiscard]] bool IsDummyThread() const {
+ return GetThreadType() == ThreadType::Dummy;
}
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
@@ -628,6 +636,14 @@ public:
return condvar_key;
}
+ // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and
+ // therefore will not block on guest kernel synchronization primitives. These methods handle
+ // blocking as needed.
+
+ void IfDummyThreadTryWait();
+ void IfDummyThreadBeginWait();
+ void IfDummyThreadEndWait();
+
private:
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
@@ -679,6 +695,8 @@ private:
void StartTermination();
+ void FinishTermination();
+
[[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
@@ -744,16 +762,17 @@ private:
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
KSpinLock context_guard{};
+ KSpinLock dummy_wait_lock{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
+ ThreadType thread_type{};
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
- ThreadType thread_type_for_debugging{};
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
diff --git a/src/core/hle/kernel/k_worker_task.h b/src/core/hle/kernel/k_worker_task.h
new file mode 100644
index 000000000..b7794c6a8
--- /dev/null
+++ b/src/core/hle/kernel/k_worker_task.h
@@ -0,0 +1,18 @@
+// Copyright 2022 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/kernel/k_synchronization_object.h"
+
+namespace Kernel {
+
+class KWorkerTask : public KSynchronizationObject {
+public:
+ explicit KWorkerTask(KernelCore& kernel_);
+
+ void DoWorkerTask();
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp
new file mode 100644
index 000000000..785e08111
--- /dev/null
+++ b/src/core/hle/kernel/k_worker_task_manager.cpp
@@ -0,0 +1,42 @@
+// Copyright 2022 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_worker_task.h"
+#include "core/hle/kernel/k_worker_task_manager.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+KWorkerTask::KWorkerTask(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
+
+void KWorkerTask::DoWorkerTask() {
+ if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) {
+ return thread->DoWorkerTaskImpl();
+ } else {
+ auto* const process = this->DynamicCast<KProcess*>();
+ ASSERT(process != nullptr);
+
+ return process->DoWorkerTaskImpl();
+ }
+}
+
+KWorkerTaskManager::KWorkerTaskManager() : m_waiting_thread(1, "yuzu:KWorkerTaskManager") {}
+
+void KWorkerTaskManager::AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task) {
+ ASSERT(type <= WorkerType::Count);
+ kernel.WorkerTaskManager().AddTask(kernel, task);
+}
+
+void KWorkerTaskManager::AddTask(KernelCore& kernel, KWorkerTask* task) {
+ KScopedSchedulerLock sl(kernel);
+ m_waiting_thread.QueueWork([task]() {
+ // Do the task.
+ task->DoWorkerTask();
+ });
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_worker_task_manager.h b/src/core/hle/kernel/k_worker_task_manager.h
new file mode 100644
index 000000000..43d1bfcec
--- /dev/null
+++ b/src/core/hle/kernel/k_worker_task_manager.h
@@ -0,0 +1,33 @@
+// Copyright 2022 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "common/thread_worker.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KWorkerTask;
+
+class KWorkerTaskManager final {
+public:
+ enum class WorkerType : u32 {
+ Exit,
+ Count,
+ };
+
+ KWorkerTaskManager();
+
+ static void AddTask(KernelCore& kernel_, WorkerType type, KWorkerTask* task);
+
+private:
+ void AddTask(KernelCore& kernel, KWorkerTask* task);
+
+private:
+ Common::ThreadWorker m_waiting_thread;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 0b618fb46..49c0714ed 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -37,6 +37,7 @@
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/service_thread.h"
@@ -300,12 +301,10 @@ struct KernelCore::Impl {
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread() {
auto make_thread = [this]() {
- std::lock_guard lk(dummy_thread_lock);
- auto& thread = dummy_threads.emplace_back(std::make_unique<KThread>(system.Kernel()));
- KAutoObject::Create(thread.get());
- ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
+ KThread* thread = KThread::Create(system.Kernel());
+ ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
- return thread.get();
+ return thread;
};
thread_local KThread* saved_thread = make_thread();
@@ -630,7 +629,7 @@ struct KernelCore::Impl {
const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
// Initialize memory managers
- memory_manager = std::make_unique<KMemoryManager>();
+ memory_manager = std::make_unique<KMemoryManager>(system);
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
application_pool.GetAddress(),
application_pool.GetEndAddress());
@@ -730,7 +729,6 @@ struct KernelCore::Impl {
std::mutex server_sessions_lock;
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
- std::mutex dummy_thread_lock;
std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
@@ -787,9 +785,6 @@ struct KernelCore::Impl {
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
- // Specifically tracked to be automatically destroyed with kernel
- std::vector<std::unique_ptr<KThread>> dummy_threads;
-
bool is_multicore{};
std::atomic_bool is_shutting_down{};
bool is_phantom_mode_for_singlecore{};
@@ -797,6 +792,8 @@ struct KernelCore::Impl {
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
+ KWorkerTaskManager worker_task_manager;
+
// System context
Core::System& system;
};
@@ -1137,6 +1134,14 @@ const Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() const {
return impl->slab_resource_counts;
}
+KWorkerTaskManager& KernelCore::WorkerTaskManager() {
+ return impl->worker_task_manager;
+}
+
+const KWorkerTaskManager& KernelCore::WorkerTaskManager() const {
+ return impl->worker_task_manager;
+}
+
bool KernelCore::IsPhantomModeForSingleCore() const {
return impl->IsPhantomModeForSingleCore();
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index b9b423908..0e04fc3bb 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -52,6 +52,7 @@ class KSharedMemory;
class KSharedMemoryInfo;
class KThread;
class KTransferMemory;
+class KWorkerTaskManager;
class KWritableEvent;
class KCodeMemory;
class PhysicalCore;
@@ -343,6 +344,12 @@ public:
/// Gets the current slab resource counts.
const Init::KSlabResourceCounts& SlabResourceCounts() const;
+ /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks.
+ KWorkerTaskManager& WorkerTaskManager();
+
+ /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks.
+ const KWorkerTaskManager& WorkerTaskManager() const;
+
private:
friend class KProcess;
friend class KThread;
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
index 03f3dec10..4eb3a5988 100644
--- a/src/core/hle/kernel/service_thread.cpp
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -12,6 +12,7 @@
#include "common/scope_exit.h"
#include "common/thread.h"
#include "core/hle/kernel/k_session.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/service_thread.h"
@@ -50,6 +51,10 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
kernel.RegisterHostThread();
+ // Ensure the dummy thread allocated for this host thread is closed on exit.
+ auto* dummy_thread = kernel.GetCurrentEmuThread();
+ SCOPE_EXIT({ dummy_thread->Close(); });
+
while (true) {
std::function<void()> task;