diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/core/hle/kernel/address_arbiter.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 32 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_synchronization_object.cpp | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/mutex.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/process.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/svc.cpp | 11 | ||||
-rw-r--r-- | src/core/hle/kernel/thread.cpp | 81 | ||||
-rw-r--r-- | src/core/hle/kernel/thread.h | 75 | ||||
-rw-r--r-- | src/core/hle/kernel/time_manager.cpp | 3 | ||||
-rw-r--r-- | src/yuzu/debugger/wait_tree.cpp | 59 |
12 files changed, 111 insertions, 172 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 23e1ef032..fe8675186 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -201,7 +201,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 current_thread->SetArbiterWaitAddress(address); InsertThread(SharedFrom(current_thread)); - current_thread->SetState(ThreadStatus::WaitArb); + current_thread->SetState(ThreadState::Waiting); current_thread->WaitForArbitration(true); } @@ -256,7 +256,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); current_thread->SetArbiterWaitAddress(address); InsertThread(SharedFrom(current_thread)); - current_thread->SetState(ThreadStatus::WaitArb); + current_thread->SetState(ThreadState::Waiting); current_thread->WaitForArbitration(true); } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index f44d31992..cdcb89f68 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -180,22 +180,22 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { return cores_needing_scheduling; } -void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { +void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Check if the state has changed, because if it hasn't there's nothing to do. - const auto cur_state = thread->scheduling_state; + const auto cur_state = thread->GetRawState(); if (cur_state == old_state) { return; } // Update the priority queues. - if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { + if (old_state == ThreadState::Runnable) { // If we were previously runnable, then we're not runnable now, and we should remove. GetPriorityQueue(kernel).Remove(thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); - } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { + } else if (cur_state == ThreadState::Runnable) { // If we're now runnable, then we weren't previously, and we should add. GetPriorityQueue(kernel).PushBack(thread); IncrementScheduledCount(thread); @@ -204,12 +204,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 ol } void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, - u32 old_priority) { + s32 old_priority) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its priority in the queue. - if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { + if (thread->GetRawState() == ThreadState::Runnable) { GetPriorityQueue(kernel).ChangePriority( old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); IncrementScheduledCount(thread); @@ -222,7 +222,7 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its affinity in the queue. - if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { + if (thread->GetRawState() == ThreadState::Runnable) { GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); @@ -395,8 +395,8 @@ void KScheduler::YieldWithoutCoreMigration() { { KScopedSchedulerLock lock(kernel); - const auto cur_state = cur_thread.scheduling_state; - if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == ThreadState::Runnable) { // Put the current thread at the back of the queue. Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); @@ -436,8 +436,8 @@ void KScheduler::YieldWithCoreMigration() { { KScopedSchedulerLock lock(kernel); - const auto cur_state = cur_thread.scheduling_state; - if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == ThreadState::Runnable) { // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); @@ -526,8 +526,8 @@ void KScheduler::YieldToAnyThread() { { KScopedSchedulerLock lock(kernel); - const auto cur_state = cur_thread.scheduling_state; - if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == ThreadState::Runnable) { // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); @@ -645,7 +645,7 @@ void KScheduler::Unload(Thread* thread) { void KScheduler::Reload(Thread* thread) { if (thread) { - ASSERT_MSG(thread->GetState() == ThreadSchedStatus::Runnable, "Thread must be runnable."); + ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); // Cancel any outstanding wakeup events for this thread thread->SetIsRunning(true); @@ -724,7 +724,7 @@ void KScheduler::SwitchToCurrent() { do { if (current_thread != nullptr && !current_thread->IsHLEThread()) { current_thread->context_guard.lock(); - if (!current_thread->IsRunnable()) { + if (current_thread->GetRawState() != ThreadState::Runnable) { current_thread->context_guard.unlock(); break; } @@ -771,7 +771,7 @@ void KScheduler::Initialize() { { KScopedSchedulerLock lock{system.Kernel()}; - idle_thread->SetState(ThreadStatus::Ready); + idle_thread->SetState(ThreadState::Runnable); } } diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index e84abc84c..677375d1a 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -100,11 +100,11 @@ public: void YieldToAnyThread(); /// Notify the scheduler a thread's status has changed. - static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state); + static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state); /// Notify the scheduler a thread's priority has changed. static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, - u32 old_priority); + s32 old_priority); /// Notify the scheduler a thread's core and/or affinity mask has changed. static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index e7fd119d8..64c566caa 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -77,7 +77,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, // Mark the thread as waiting. thread->SetCancellable(); thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); - thread->SetState(ThreadState::WaitSynch); + thread->SetState(ThreadState::Waiting); } // The lock/sleep is done, so we should be able to get our result. @@ -148,9 +148,9 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) { // Iterate over each thread. for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { Thread* thread = cur_node->thread; - if (thread->GetState() == ThreadSchedStatus::Paused) { + if (thread->GetState() == ThreadState::Waiting) { thread->SetSyncedObject(this, result); - thread->SetState(ThreadStatus::Ready); + thread->SetState(ThreadState::Runnable); } } } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f1dcbe2eb..af4a5e33d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -602,7 +602,7 @@ void KernelCore::Suspend(bool in_suspention) { const bool should_suspend = exception_exited || in_suspention; { KScopedSchedulerLock lock(*this); - ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; + const auto status = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { impl->suspend_threads[i]->SetState(status); } diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index badd883aa..8a0faacf8 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -107,7 +107,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, current_thread->SetMutexWaitAddress(address); current_thread->SetWaitHandle(requesting_thread_handle); - current_thread->SetState(ThreadStatus::WaitMutex); + current_thread->SetState(ThreadState::Waiting); // Update the lock holder thread's priority to prevent priority inversion. holding_thread->AddMutexWaiter(current_thread); diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 92e877c3e..a306c7c73 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -55,7 +55,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires { KScopedSchedulerLock lock{kernel}; - thread->SetState(ThreadStatus::Ready); + thread->SetState(ThreadState::Runnable); } } } // Anonymous namespace @@ -318,7 +318,7 @@ void Process::PrepareForTermination() { continue; // TODO(Subv): When are the other running/ready threads terminated? - ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch, + ASSERT_MSG(thread->GetState() == ThreadState::Waiting, "Exiting processes with non-waiting threads is currently unimplemented"); thread->Stop(); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 0a3064c7d..304b8727d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -343,7 +343,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { KScopedSchedulerLock lock(kernel); - thread->SetState(ThreadStatus::WaitIPC); + thread->SetState(ThreadState::Waiting); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); } @@ -1546,7 +1546,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) { return ERR_INVALID_HANDLE; } - ASSERT(thread->GetStatus() == ThreadStatus::Dormant); + ASSERT(thread->GetState() == ThreadState::Initialized); return thread->Start(); } @@ -1661,7 +1661,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add current_thread->SetCondVarWaitAddress(condition_variable_addr); current_thread->SetMutexWaitAddress(mutex_addr); current_thread->SetWaitHandle(thread_handle); - current_thread->SetState(ThreadStatus::WaitCondVar); + current_thread->SetState(ThreadState::Waiting); + current_thread->SetWaitingCondVar(true); current_process->InsertConditionVariableThread(SharedFrom(current_thread)); } @@ -1755,9 +1756,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); auto owner = handle_table.Get<Thread>(owner_handle); ASSERT(owner); - if (thread->GetStatus() == ThreadStatus::WaitCondVar) { - thread->SetState(ThreadStatus::WaitMutex); - } + thread->SetWaitingCondVar(false); owner->AddMutexWaiter(thread); } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ac19e2997..33a4e1fa3 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -44,7 +44,7 @@ Thread::~Thread() = default; void Thread::Stop() { { KScopedSchedulerLock lock(kernel); - SetState(ThreadStatus::Dead); + SetState(ThreadState::Terminated); signaled = true; NotifyAvailable(); kernel.GlobalHandleTable().Close(global_handle); @@ -62,54 +62,43 @@ void Thread::Stop() { void Thread::Wakeup() { KScopedSchedulerLock lock(kernel); - switch (status) { - case ThreadStatus::Paused: - case ThreadStatus::WaitSynch: - case ThreadStatus::WaitHLEEvent: - case ThreadStatus::WaitSleep: - case ThreadStatus::WaitIPC: - case ThreadStatus::WaitMutex: - case ThreadStatus::WaitCondVar: - case ThreadStatus::WaitArb: - case ThreadStatus::Dormant: - break; - - case ThreadStatus::Ready: + switch (thread_state) { + case ThreadState::Runnable: // If the thread is waiting on multiple wait objects, it might be awoken more than once // before actually resuming. We can ignore subsequent wakeups if the thread status has // already been set to ThreadStatus::Ready. return; - case ThreadStatus::Dead: + case ThreadState::Terminated: // This should never happen, as threads must complete before being stopped. DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", GetObjectId()); return; } - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); } void Thread::OnWakeUp() { KScopedSchedulerLock lock(kernel); - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); } ResultCode Thread::Start() { KScopedSchedulerLock lock(kernel); - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); return RESULT_SUCCESS; } void Thread::CancelWait() { KScopedSchedulerLock lock(kernel); - if (GetState() != ThreadSchedStatus::Paused || !is_cancellable) { + if (GetState() != ThreadState::Waiting || !is_cancellable) { is_sync_cancelled = true; return; } // TODO(Blinkhawk): Implement cancel of server session is_sync_cancelled = false; SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); } static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, @@ -173,7 +162,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); thread->thread_id = kernel.CreateNewThreadID(); - thread->status = ThreadStatus::Dormant; + thread->thread_state = ThreadState::Initialized; thread->entry_point = entry_point; thread->stack_top = stack_top; thread->disable_count = 1; @@ -235,27 +224,18 @@ VAddr Thread::GetCommandBufferAddress() const { return GetTLSAddress() + command_header_offset; } -void Thread::SetState(ThreadStatus new_status) { - if (new_status == status) { +void Thread::SetState(ThreadState new_status) { + if (new_status == thread_state) { return; } - switch (new_status) { - case ThreadStatus::Ready: - SetSchedulingStatus(ThreadSchedStatus::Runnable); - break; - case ThreadStatus::Dormant: - SetSchedulingStatus(ThreadSchedStatus::None); - break; - case ThreadStatus::Dead: - SetSchedulingStatus(ThreadSchedStatus::Exited); - break; - default: - SetSchedulingStatus(ThreadSchedStatus::Paused); - break; + if (new_status != ThreadState::Waiting) { + SetWaitingCondVar(false); } - status = new_status; + SetSchedulingStatus(new_status); + + thread_state = new_status; } void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) { @@ -312,13 +292,13 @@ void Thread::UpdatePriority() { return; } - if (GetStatus() == ThreadStatus::WaitCondVar) { + if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { owner_process->RemoveConditionVariableThread(SharedFrom(this)); } SetCurrentPriority(new_priority); - if (GetStatus() == ThreadStatus::WaitCondVar) { + if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { owner_process->InsertConditionVariableThread(SharedFrom(this)); } @@ -340,7 +320,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) { auto sched_status = GetState(); - if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { + if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) { return ERR_INVALID_STATE; } @@ -366,7 +346,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) { Handle event_handle{}; { KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); - SetState(ThreadStatus::WaitSleep); + SetState(ThreadState::Waiting); } if (event_handle != InvalidHandle) { @@ -377,25 +357,24 @@ ResultCode Thread::Sleep(s64 nanoseconds) { } void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { - const u32 old_state = scheduling_state; + const auto old_state = GetRawState(); pausing_state |= static_cast<u32>(flag); - const u32 base_scheduling = static_cast<u32>(GetState()); - scheduling_state = base_scheduling | pausing_state; + const auto base_scheduling = GetState(); + thread_state = base_scheduling | static_cast<ThreadState>(pausing_state); KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { - const u32 old_state = scheduling_state; + const auto old_state = GetRawState(); pausing_state &= ~static_cast<u32>(flag); - const u32 base_scheduling = static_cast<u32>(GetState()); - scheduling_state = base_scheduling | pausing_state; + const auto base_scheduling = GetState(); + thread_state = base_scheduling | static_cast<ThreadState>(pausing_state); KScheduler::OnThreadStateChanged(kernel, this, old_state); } -void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { - const u32 old_state = scheduling_state; - scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | - static_cast<u32>(new_status); +void Thread::SetSchedulingStatus(ThreadState new_status) { + const auto old_state = GetRawState(); + thread_state = (thread_state & ThreadState::HighMask) | new_status; KScheduler::OnThreadStateChanged(kernel, this, old_state); } diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 69458548b..06dd2ef2d 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -73,19 +73,26 @@ enum ThreadProcessorId : s32 { (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3) }; -enum class ThreadStatus { - Ready, ///< Ready to run - Paused, ///< Paused by SetThreadActivity or debug - WaitHLEEvent, ///< Waiting for hle event to finish - WaitSleep, ///< Waiting due to a SleepThread SVC - WaitIPC, ///< Waiting for the reply from an IPC request - WaitSynch, ///< Waiting due to WaitSynchronization - WaitMutex, ///< Waiting due to an ArbitrateLock svc - WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc - WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc - Dormant, ///< Created but not yet made ready - Dead ///< Run to completion, or forcefully terminated +enum class ThreadState : u16 { + Initialized = 0, + Waiting = 1, + Runnable = 2, + Terminated = 3, + + SuspendShift = 4, + Mask = (1 << SuspendShift) - 1, + + ProcessSuspended = (1 << (0 + SuspendShift)), + ThreadSuspended = (1 << (1 + SuspendShift)), + DebugSuspended = (1 << (2 + SuspendShift)), + BacktraceSuspended = (1 << (3 + SuspendShift)), + InitSuspended = (1 << (4 + SuspendShift)), + + SuspendFlagMask = ((1 << 5) - 1) << SuspendShift, + + HighMask = 0xfff0, }; +DECLARE_ENUM_FLAG_OPERATORS(ThreadState); enum class ThreadWakeupReason { Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal. @@ -97,13 +104,6 @@ enum class ThreadActivity : u32 { Paused = 1, }; -enum class ThreadSchedStatus : u32 { - None = 0, - Paused = 1, - Runnable = 2, - Exited = 3, -}; - enum class ThreadSchedFlags : u32 { ProcessPauseFlag = 1 << 4, ThreadPauseFlag = 1 << 5, @@ -111,12 +111,6 @@ enum class ThreadSchedFlags : u32 { KernelInitPauseFlag = 1 << 8, }; -enum class ThreadSchedMasks : u32 { - LowMask = 0x000f, - HighMask = 0xfff0, - ForcePauseMask = 0x0070, -}; - class Thread final : public KSynchronizationObject { public: explicit Thread(KernelCore& kernel); @@ -326,11 +320,19 @@ public: std::shared_ptr<Common::Fiber>& GetHostContext(); - ThreadStatus GetStatus() const { - return status; + ThreadState GetState() const { + return thread_state & ThreadState::Mask; } - void SetState(ThreadStatus new_status); + ThreadState GetRawState() const { + return thread_state; + } + + void SetState(ThreadState new_state); + + void SetWaitingCondVar(bool value) { + is_waiting_on_condvar = value; + } s64 GetLastScheduledTick() const { return this->last_scheduled_tick; @@ -447,15 +449,6 @@ public: this->schedule_count = count; } - ThreadSchedStatus GetState() const { - return static_cast<ThreadSchedStatus>(scheduling_state & - static_cast<u32>(ThreadSchedMasks::LowMask)); - } - - bool IsRunnable() const { - return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable); - } - bool IsRunning() const { return is_running; } @@ -497,7 +490,7 @@ public: } bool IsTerminationRequested() const { - return will_be_terminated || GetState() == ThreadSchedStatus::Exited; + return will_be_terminated || GetState() == ThreadState::Terminated; } bool IsPaused() const { @@ -590,7 +583,7 @@ private: friend class KScheduler; friend class Process; - void SetSchedulingStatus(ThreadSchedStatus new_status); + void SetSchedulingStatus(ThreadState new_status); void AddSchedulingFlag(ThreadSchedFlags flag); void RemoveSchedulingFlag(ThreadSchedFlags flag); void SetCurrentPriority(u32 new_priority); @@ -600,8 +593,7 @@ private: ThreadContext64 context_64{}; std::shared_ptr<Common::Fiber> host_context{}; - ThreadStatus status = ThreadStatus::Dormant; - u32 scheduling_state = 0; + ThreadState thread_state = ThreadState::Initialized; u64 thread_id = 0; @@ -647,6 +639,7 @@ private: /// If waiting on a ConditionVariable, this is the ConditionVariable address VAddr condvar_wait_address = 0; + bool is_waiting_on_condvar{}; /// If waiting on a Mutex, this is the mutex address VAddr mutex_wait_address = 0; /// The handle used to wait for the mutex. diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 79628e2b4..b58a76dba 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -42,8 +42,7 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 event_handle = timetask->GetGlobalHandle(); if (nanoseconds > 0) { ASSERT(timetask); - ASSERT(timetask->GetStatus() != ThreadStatus::Ready); - ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex); + ASSERT(timetask->GetState() != ThreadState::Runnable); system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, time_manager_event_type, event_handle); } else { diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 8d91d600a..acf6b7ab5 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -238,8 +238,8 @@ WaitTreeThread::~WaitTreeThread() = default; QString WaitTreeThread::GetText() const { const auto& thread = static_cast<const Kernel::Thread&>(object); QString status; - switch (thread.GetStatus()) { - case Kernel::ThreadStatus::Ready: + switch (thread.GetState()) { + case Kernel::ThreadState::Runnable: if (!thread.IsPaused()) { if (thread.WasRunning()) { status = tr("running"); @@ -250,35 +250,14 @@ QString WaitTreeThread::GetText() const { status = tr("paused"); } break; - case Kernel::ThreadStatus::Paused: - status = tr("paused"); + case Kernel::ThreadState::Waiting: + status = tr("waiting"); break; - case Kernel::ThreadStatus::WaitHLEEvent: - status = tr("waiting for HLE return"); + case Kernel::ThreadState::Initialized: + status = tr("initialized"); break; - case Kernel::ThreadStatus::WaitSleep: - status = tr("sleeping"); - break; - case Kernel::ThreadStatus::WaitIPC: - status = tr("waiting for IPC reply"); - break; - case Kernel::ThreadStatus::WaitSynch: - status = tr("waiting for objects"); - break; - case Kernel::ThreadStatus::WaitMutex: - status = tr("waiting for mutex"); - break; - case Kernel::ThreadStatus::WaitCondVar: - status = tr("waiting for condition variable"); - break; - case Kernel::ThreadStatus::WaitArb: - status = tr("waiting for address arbiter"); - break; - case Kernel::ThreadStatus::Dormant: - status = tr("dormant"); - break; - case Kernel::ThreadStatus::Dead: - status = tr("dead"); + case Kernel::ThreadState::Terminated: + status = tr("terminated"); break; } @@ -294,8 +273,8 @@ QColor WaitTreeThread::GetColor() const { const std::size_t color_index = IsDarkTheme() ? 1 : 0; const auto& thread = static_cast<const Kernel::Thread&>(object); - switch (thread.GetStatus()) { - case Kernel::ThreadStatus::Ready: + switch (thread.GetState()) { + case Kernel::ThreadState::Runnable: if (!thread.IsPaused()) { if (thread.WasRunning()) { return QColor(WaitTreeColors[0][color_index]); @@ -305,21 +284,11 @@ QColor WaitTreeThread::GetColor() const { } else { return QColor(WaitTreeColors[2][color_index]); } - case Kernel::ThreadStatus::Paused: + case Kernel::ThreadState::Waiting: return QColor(WaitTreeColors[3][color_index]); - case Kernel::ThreadStatus::WaitHLEEvent: - case Kernel::ThreadStatus::WaitIPC: - return QColor(WaitTreeColors[4][color_index]); - case Kernel::ThreadStatus::WaitSleep: - return QColor(WaitTreeColors[5][color_index]); - case Kernel::ThreadStatus::WaitSynch: - case Kernel::ThreadStatus::WaitMutex: - case Kernel::ThreadStatus::WaitCondVar: - case Kernel::ThreadStatus::WaitArb: - return QColor(WaitTreeColors[6][color_index]); - case Kernel::ThreadStatus::Dormant: + case Kernel::ThreadState::Initialized: return QColor(WaitTreeColors[7][color_index]); - case Kernel::ThreadStatus::Dead: + case Kernel::ThreadState::Terminated: return QColor(WaitTreeColors[8][color_index]); default: return WaitTreeItem::GetColor(); @@ -367,7 +336,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); } - if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) { + if (thread.GetState() == Kernel::ThreadState::Waiting) { list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjectsForDebugging(), thread.IsCancellable())); } |