summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
authorZach Hilman <zachhilman@gmail.com>2018-11-19 05:44:19 +0100
committerZach Hilman <zachhilman@gmail.com>2018-11-19 05:44:19 +0100
commit409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a (patch)
treeccb9eae7c7e8b93760f3087fb6e67a13cbb24f2c /src/core/hle
parentMerge pull request #1717 from FreddyFunk/swizzle-gob (diff)
downloadyuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar
yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar.gz
yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar.bz2
yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar.lz
yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar.xz
yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar.zst
yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.zip
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/scheduler.cpp18
-rw-r--r--src/core/hle/kernel/scheduler.h6
-rw-r--r--src/core/hle/kernel/svc.cpp27
-rw-r--r--src/core/hle/kernel/thread.cpp60
-rw-r--r--src/core/hle/kernel/thread.h5
5 files changed, 114 insertions, 2 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5a5f4cef1..fb5e14950 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -169,6 +169,16 @@ void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
ready_queue.remove(priority, thread);
}
+void Scheduler::RescheduleThread(Thread* thread, u32 priority) {
+ std::lock_guard<std::mutex> lock(scheduler_mutex);
+
+ // Thread is not in queue
+ ASSERT(ready_queue.contains(thread) != -1);
+
+ ready_queue.remove(priority, thread);
+ ready_queue.push_back(priority, thread);
+}
+
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
std::lock_guard<std::mutex> lock(scheduler_mutex);
@@ -179,4 +189,12 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
ready_queue.prepare(priority);
}
+Thread* Scheduler::GetNextSuggestedThread(u32 core) {
+ std::lock_guard<std::mutex> lock(scheduler_mutex);
+
+ const auto mask = 1 << core;
+ return ready_queue.get_first_filter(
+ [&mask](Thread* thread) { return (thread->GetAffinityMask() & mask) != 0; });
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index c63032b7d..8444afdbc 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -48,9 +48,15 @@ public:
/// Unschedules a thread that was already scheduled
void UnscheduleThread(Thread* thread, u32 priority);
+ /// Moves a thread to the back of the current priority queue
+ void RescheduleThread(Thread* thread, u32 priority);
+
/// Sets the priority of a thread in the scheduler
void SetThreadPriority(Thread* thread, u32 priority);
+ /// Gets the next suggested thread for load balancing
+ Thread* GetNextSuggestedThread(u32 core);
+
/// Returns a list of all threads managed by the scheduler
const std::vector<SharedPtr<Thread>>& GetThreadList() const {
return thread_list;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 75dbfc31d..467575c93 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -962,16 +962,39 @@ static void SleepThread(s64 nanoseconds) {
// Don't attempt to yield execution if there are no available threads to run,
// this way we avoid a useless reschedule to the idle thread.
- if (nanoseconds == 0 && !Core::System::GetInstance().CurrentScheduler().HaveReadyThreads())
+ if (!Core::System::GetInstance().CurrentScheduler().HaveReadyThreads())
return;
+ if (nanoseconds <= 0) {
+ switch (nanoseconds) {
+ case 0:
+ GetCurrentThread()->YieldNormal();
+ break;
+ case -1:
+ GetCurrentThread()->YieldWithLoadBalancing();
+ break;
+ case -2:
+ GetCurrentThread()->YieldAndWaitForLoadBalancing();
+ break;
+ default:
+ UNREACHABLE_MSG(
+ "Unimplemented sleep yield type '{:016X}'! Falling back to forced reschedule...",
+ nanoseconds);
+ }
+
+ nanoseconds = 0;
+ }
+
// Sleep current thread and check for next thread to schedule
WaitCurrentThread_Sleep();
// Create an event to wake the thread up after the specified nanosecond delay has passed
GetCurrentThread()->WakeAfterDelay(nanoseconds);
- Core::System::GetInstance().PrepareReschedule();
+ Core::System::GetInstance().CpuCore(0).PrepareReschedule();
+ Core::System::GetInstance().CpuCore(1).PrepareReschedule();
+ Core::System::GetInstance().CpuCore(2).PrepareReschedule();
+ Core::System::GetInstance().CpuCore(3).PrepareReschedule();
}
/// Wait process wide key atomic
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 4ffb76818..ddc4da1c0 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -388,6 +388,66 @@ bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> t
return wakeup_callback(reason, std::move(thread), std::move(object), index);
}
+void Thread::YieldNormal() {
+ // Avoid yielding if the thread isn't even running.
+ if (status != ThreadStatus::Running) {
+ return;
+ }
+
+ if (nominal_priority < THREADPRIO_COUNT) {
+ scheduler->RescheduleThread(this, nominal_priority);
+ scheduler->Reschedule();
+ }
+}
+
+void Thread::YieldWithLoadBalancing() {
+ auto priority = nominal_priority;
+ auto core = processor_id;
+
+ // Avoid yielding if the thread isn't even running.
+ if (status != ThreadStatus::Running) {
+ Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
+ return;
+ }
+
+ SharedPtr<Thread> next;
+ const auto& threads = scheduler->GetThreadList();
+
+ if (priority < THREADPRIO_COUNT) {
+ // Reschedule thread to end of queue.
+ scheduler->RescheduleThread(this, priority);
+
+ const auto iter = std::find_if(threads.begin(), threads.end(),
+ [&priority](const SharedPtr<Thread>& thread) {
+ return thread->GetNominalPriority() == priority;
+ });
+
+ if (iter != threads.end())
+ next = iter->get();
+ }
+
+ Thread* suggested_thread = nullptr;
+
+ for (int i = 0; i < 4; ++i) {
+ if (i == core)
+ continue;
+
+ const auto res =
+ Core::System::GetInstance().CpuCore(i).Scheduler().GetNextSuggestedThread(core);
+ if (res != nullptr) {
+ suggested_thread = res;
+ break;
+ }
+ }
+
+ if (suggested_thread != nullptr)
+ suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
+}
+
+void Thread::YieldAndWaitForLoadBalancing() {
+ UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!");
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index d384d50db..e97434dd8 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -26,6 +26,7 @@ enum ThreadPriority : u32 {
THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
THREADPRIO_LOWEST = 63, ///< Lowest thread priority
+ THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
};
enum ThreadProcessorId : s32 {
@@ -370,6 +371,10 @@ public:
return affinity_mask;
}
+ void YieldNormal();
+ void YieldWithLoadBalancing();
+ void YieldAndWaitForLoadBalancing();
+
private:
explicit Thread(KernelCore& kernel);
~Thread() override;