summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h62
-rw-r--r--src/core/hle/kernel/scheduler.cpp10
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/kernel/thread.cpp11
-rw-r--r--src/core/hle/kernel/thread.h6
-rw-r--r--src/yuzu/debugger/wait_tree.cpp4
7 files changed, 80 insertions, 16 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 66de33799..adabe8dbd 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -152,6 +152,7 @@ add_library(core STATIC
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h
+ hle/kernel/k_affinity_mask.h
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory/address_space_info.cpp
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
new file mode 100644
index 000000000..fa2a720a4
--- /dev/null
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -0,0 +1,62 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+// This file references various implementation details from Atmosphere, an open-source firmware for
+// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hardware_properties.h"
+
+namespace Kernel {
+
+class KAffinityMask {
+private:
+ static constexpr u64 AllowedAffinityMask = (1ul << Core::Hardware::NUM_CPU_CORES) - 1;
+
+private:
+ u64 mask;
+
+private:
+ static constexpr u64 GetCoreBit(s32 core) {
+ ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ return (1ull << core);
+ }
+
+public:
+ constexpr KAffinityMask() : mask(0) {
+ ASSERT(this);
+ }
+
+ constexpr u64 GetAffinityMask() const {
+ return this->mask;
+ }
+
+ constexpr void SetAffinityMask(u64 new_mask) {
+ ASSERT((new_mask & ~AllowedAffinityMask) == 0);
+ this->mask = new_mask;
+ }
+
+ constexpr bool GetAffinity(s32 core) const {
+ return this->mask & GetCoreBit(core);
+ }
+
+ constexpr void SetAffinity(s32 core, bool set) {
+ ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+
+ if (set) {
+ this->mask |= GetCoreBit(core);
+ } else {
+ this->mask &= ~GetCoreBit(core);
+ }
+ }
+
+ constexpr void SetAll() {
+ this->mask = AllowedAffinityMask;
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5c63b0b4a..9a969fdb5 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -452,7 +452,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
+ thread->affinity_mask.GetAffinity(core)) {
Unsuggest(thread->current_priority, core, thread);
}
}
@@ -464,7 +464,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
+ thread->affinity_mask.GetAffinity(core)) {
Suggest(thread->current_priority, core, thread);
}
}
@@ -484,7 +484,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
+ thread->affinity_mask.GetAffinity(core)) {
Unsuggest(old_priority, core, thread);
}
}
@@ -500,7 +500,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
if (core != static_cast<u32>(thread->processor_id) &&
- ((thread->affinity_mask >> core) & 1) != 0) {
+ thread->affinity_mask.GetAffinity(core)) {
Suggest(thread->current_priority, core, thread);
}
}
@@ -527,7 +527,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit
}
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
- if (((thread->affinity_mask >> core) & 1) != 0) {
+ if (thread->affinity_mask.GetAffinity(core)) {
if (core == static_cast<u32>(thread->processor_id)) {
Schedule(thread->current_priority, core, thread);
} else {
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 95d6e2b4d..0cd712d09 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -2003,7 +2003,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
}
*core = thread->GetIdealCore();
- *mask = thread->GetAffinityMask();
+ *mask = thread->GetAffinityMask().GetAffinityMask();
return RESULT_SUCCESS;
}
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 7d1eb2c6e..38b4a0987 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -191,7 +191,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
thread->last_running_ticks = 0;
thread->processor_id = processor_id;
thread->ideal_core = processor_id;
- thread->affinity_mask = 1ULL << processor_id;
+ thread->affinity_mask.SetAffinity(processor_id, true);
thread->wait_objects = nullptr;
thread->mutex_wait_address = 0;
thread->condvar_wait_address = 0;
@@ -479,15 +479,16 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
}
if (use_override) {
ideal_core_override = new_core;
- affinity_mask_override = new_affinity_mask;
} else {
- const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
+ const auto old_affinity_mask = affinity_mask.GetAffinityMask();
+ affinity_mask.SetAffinityMask(new_affinity_mask);
ideal_core = new_core;
if (old_affinity_mask != new_affinity_mask) {
const s32 old_core = processor_id;
- if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
+ if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
if (static_cast<s32>(ideal_core) < 0) {
- processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
+ processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
+ Core::Hardware::NUM_CPU_CORES);
} else {
processor_id = ideal_core;
}
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index a75071e9b..5192ecff1 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -12,6 +12,7 @@
#include "common/common_types.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
+#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/result.h"
@@ -469,7 +470,7 @@ public:
return ideal_core;
}
- u64 GetAffinityMask() const {
+ constexpr const KAffinityMask& GetAffinityMask() const {
return affinity_mask;
}
@@ -649,10 +650,9 @@ private:
Scheduler* scheduler = nullptr;
u32 ideal_core{0xFFFFFFFF};
- u64 affinity_mask{0x1};
+ KAffinityMask affinity_mask{};
s32 ideal_core_override = -1;
- u64 affinity_mask_override = 0x1;
u32 affinity_override_count = 0;
u32 scheduling_state = 0;
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index a20824719..c4ae1d61f 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -349,8 +349,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
list.push_back(
std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore())));
- list.push_back(
- std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.GetAffinityMask())));
+ list.push_back(std::make_unique<WaitTreeText>(
+ tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask())));
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
.arg(thread.GetPriority())