From 10738839ad7b9abbcf8ac64c6e58de63a9fbae76 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 24 Jan 2021 22:55:08 -0800 Subject: yuzu: debugger: Ignore HLE threads. --- src/core/hle/kernel/k_thread.cpp | 15 ++++++++------- src/core/hle/kernel/k_thread.h | 5 +++++ src/yuzu/debugger/wait_tree.cpp | 10 ++++++++-- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index f57e98047..45ad589d9 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -101,11 +101,12 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast(type)); break; } + thread_type_for_debugging = type; // Set the ideal core ID and affinity mask. virtual_ideal_core_id = virt_core; physical_ideal_core_id = phys_core; - virtual_affinity_mask = (static_cast(1) << virt_core); + virtual_affinity_mask = 1ULL << virt_core; physical_affinity_mask.SetAffinity(phys_core, true); // Set the thread state. @@ -353,7 +354,7 @@ void KThread::Unpin() { // Enable core migration. ASSERT(num_core_migration_disables == 1); { - --num_core_migration_disables; + num_core_migration_disables--; // Restore our original state. const KAffinityMask old_mask = physical_affinity_mask; @@ -494,8 +495,8 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { // Update the pinned waiter list. { - bool retry_update = false; - bool thread_is_pinned = false; + bool retry_update{}; + bool thread_is_pinned{}; do { // Lock the scheduler. KScopedSchedulerLock sl{kernel}; @@ -507,7 +508,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { retry_update = false; // Check if the thread is currently running. - bool thread_is_current = false; + bool thread_is_current{}; s32 thread_core; for (thread_core = 0; thread_core < static_cast(Core::Hardware::NUM_CPU_CORES); ++thread_core) { @@ -683,8 +684,8 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { // If the thread is now paused, update the pinned waiter list. if (activity == Svc::ThreadActivity::Paused) { - bool thread_is_pinned = false; - bool thread_is_current; + bool thread_is_pinned{}; + bool thread_is_current{}; do { // Lock the scheduler. KScopedSchedulerLock sl{kernel}; diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index eeddf5a65..c8ac656a4 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -534,6 +534,10 @@ public: return wait_reason_for_debugging; } + [[nodiscard]] ThreadType GetThreadTypeForDebugging() const { + return thread_type_for_debugging; + } + void SetWaitObjectsForDebugging(const std::span& objects) { wait_objects_for_debugging.clear(); wait_objects_for_debugging.reserve(objects.size()); @@ -721,6 +725,7 @@ private: std::vector wait_objects_for_debugging; VAddr mutex_wait_address_for_debugging{}; ThreadWaitReasonForDebugging wait_reason_for_debugging{}; + ThreadType thread_type_for_debugging{}; std::string name; public: diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index cbec692f9..0e5156dcc 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -93,8 +93,10 @@ std::vector> WaitTreeItem::MakeThreadItemList() std::size_t row = 0; auto add_threads = [&](const std::vector>& threads) { for (std::size_t i = 0; i < threads.size(); ++i) { - item_list.push_back(std::make_unique(*threads[i])); - item_list.back()->row = row; + if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) { + item_list.push_back(std::make_unique(*threads[i])); + item_list.back()->row = row; + } ++row; } }; @@ -148,6 +150,10 @@ QString WaitTreeCallstack::GetText() const { std::vector> WaitTreeCallstack::GetChildren() const { std::vector> list; + if (thread.GetThreadTypeForDebugging() != Kernel::ThreadType::User) { + return list; + } + if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64BitProcess()) { return list; } -- cgit v1.2.3