summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorFernando Sahmkow <fsahmkow27@gmail.com>2020-03-08 16:25:50 +0100
committerFernando Sahmkow <fsahmkow27@gmail.com>2020-06-27 17:35:39 +0200
commit4217e58a103049675df27f404171f73fa0be8537 (patch)
tree707ae23ae29e346d21bc7febe987104801079326 /src/core/hle/kernel/scheduler.cpp
parentMutex: Revert workaround due to poor exclusive memory. (diff)
downloadyuzu-4217e58a103049675df27f404171f73fa0be8537.tar
yuzu-4217e58a103049675df27f404171f73fa0be8537.tar.gz
yuzu-4217e58a103049675df27f404171f73fa0be8537.tar.bz2
yuzu-4217e58a103049675df27f404171f73fa0be8537.tar.lz
yuzu-4217e58a103049675df27f404171f73fa0be8537.tar.xz
yuzu-4217e58a103049675df27f404171f73fa0be8537.tar.zst
yuzu-4217e58a103049675df27f404171f73fa0be8537.zip
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp28
1 files changed, 21 insertions, 7 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index affc2fbed..ab17204bb 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -147,9 +147,11 @@ bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
const u32 priority = yielding_thread->GetPriority();
// Yield the thread
- const Thread* const winner = scheduled_queue[core_id].front(priority);
- ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front");
- scheduled_queue[core_id].yield(priority);
+ Reschedule(priority, core_id, yielding_thread);
+ const Thread* const winner = scheduled_queue[core_id].front();
+ if (kernel.GetCurrentHostThreadID() != core_id) {
+ is_reselection_pending.store(true, std::memory_order_release);
+ }
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
@@ -162,9 +164,7 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
const u32 priority = yielding_thread->GetPriority();
// Yield the thread
- ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
- "Thread yielding without being in front");
- scheduled_queue[core_id].yield(priority);
+ Reschedule(priority, core_id, yielding_thread);
std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
for (std::size_t i = 0; i < current_threads.size(); i++) {
@@ -200,6 +200,10 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
winner = next_thread;
}
+ if (kernel.GetCurrentHostThreadID() != core_id) {
+ is_reselection_pending.store(true, std::memory_order_release);
+ }
+
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
@@ -239,6 +243,12 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
} else {
winner = yielding_thread;
}
+ } else {
+ winner = scheduled_queue[i].front();
+ }
+
+ if (kernel.GetCurrentHostThreadID() != core_id) {
+ is_reselection_pending.store(true, std::memory_order_release);
}
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
@@ -687,7 +697,11 @@ void Scheduler::SwitchToCurrent() {
while (!is_context_switch_pending) {
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
current_thread->context_guard.lock();
- if (current_thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
+ if (!current_thread->IsRunnable()) {
+ current_thread->context_guard.unlock();
+ break;
+ }
+ if (current_thread->GetProcessorID() != core_id) {
current_thread->context_guard.unlock();
break;
}