summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp102
1 files changed, 51 insertions, 51 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 962530d2d..e84e5ce0d 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -50,7 +50,7 @@ void Thread::Stop() {
// Clean up any dangling references in objects that this thread was waiting for
for (auto& wait_object : wait_objects) {
- wait_object->RemoveWaitingThread(this);
+ wait_object->RemoveWaitingThread(SharedFrom(this));
}
wait_objects.clear();
@@ -77,18 +77,6 @@ void Thread::CancelWakeupTimer() {
callback_handle);
}
-static std::optional<s32> GetNextProcessorId(u64 mask) {
- for (s32 index = 0; index < Core::NUM_CPU_CORES; ++index) {
- if (mask & (1ULL << index)) {
- if (!Core::System::GetInstance().Scheduler(index).GetCurrentThread()) {
- // Core is enabled and not running any threads, use this one
- return index;
- }
- }
- }
- return {};
-}
-
void Thread::ResumeFromWait() {
ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects");
@@ -132,8 +120,11 @@ void Thread::ResumeFromWait() {
}
void Thread::CancelWait() {
- ASSERT(GetStatus() == ThreadStatus::WaitSynch);
- ClearWaitObjects();
+ if (GetSchedulingStatus() != ThreadSchedStatus::Paused) {
+ is_sync_cancelled = true;
+ return;
+ }
+ is_sync_cancelled = false;
SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED);
ResumeFromWait();
}
@@ -156,9 +147,10 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd
context.fpcr = 0x03C00000;
}
-ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point,
- u32 priority, u64 arg, s32 processor_id,
- VAddr stack_top, Process& owner_process) {
+ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name,
+ VAddr entry_point, u32 priority, u64 arg,
+ s32 processor_id, VAddr stack_top,
+ Process& owner_process) {
// Check if priority is in ranged. Lowest priority -> highest priority id.
if (priority > THREADPRIO_LOWEST) {
LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
@@ -170,14 +162,14 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
return ERR_INVALID_PROCESSOR_ID;
}
- if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) {
+ auto& system = Core::System::GetInstance();
+ if (!system.Memory().IsValidVirtualAddress(owner_process, entry_point)) {
LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
// TODO (bunnei): Find the correct error code to use here
- return ResultCode(-1);
+ return RESULT_UNKNOWN;
}
- auto& system = Core::System::GetInstance();
- SharedPtr<Thread> thread(new Thread(kernel));
+ std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
thread->thread_id = kernel.CreateNewThreadID();
thread->status = ThreadStatus::Dormant;
@@ -206,7 +198,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
// to initialize the context
ResetThreadContext(thread->context, stack_top, entry_point, arg);
- return MakeResult<SharedPtr<Thread>>(std::move(thread));
+ return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
}
void Thread::SetPriority(u32 priority) {
@@ -224,7 +216,7 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
context.cpu_registers[1] = output;
}
-s32 Thread::GetWaitObjectIndex(const WaitObject* object) const {
+s32 Thread::GetWaitObjectIndex(std::shared_ptr<WaitObject> object) const {
ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
@@ -264,8 +256,8 @@ void Thread::SetStatus(ThreadStatus new_status) {
status = new_status;
}
-void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
- if (thread->lock_owner == this) {
+void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) {
+ if (thread->lock_owner.get() == this) {
// If the thread is already waiting for this thread to release the mutex, ensure that the
// waiters list is consistent and return without doing anything.
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
@@ -285,13 +277,13 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
wait_mutex_threads.begin(), wait_mutex_threads.end(),
[&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
wait_mutex_threads.insert(insertion_point, thread);
- thread->lock_owner = this;
+ thread->lock_owner = SharedFrom(this);
UpdatePriority();
}
-void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) {
- ASSERT(thread->lock_owner == this);
+void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) {
+ ASSERT(thread->lock_owner.get() == this);
// Ensure that the thread is in the list of mutex waiters
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
@@ -318,16 +310,24 @@ void Thread::UpdatePriority() {
return;
}
+ if (GetStatus() == ThreadStatus::WaitCondVar) {
+ owner_process->RemoveConditionVariableThread(SharedFrom(this));
+ }
+
SetCurrentPriority(new_priority);
+ if (GetStatus() == ThreadStatus::WaitCondVar) {
+ owner_process->InsertConditionVariableThread(SharedFrom(this));
+ }
+
if (!lock_owner) {
return;
}
// Ensure that the thread is within the correct location in the waiting list.
auto old_owner = lock_owner;
- lock_owner->RemoveMutexWaiter(this);
- old_owner->AddMutexWaiter(this);
+ lock_owner->RemoveMutexWaiter(SharedFrom(this));
+ old_owner->AddMutexWaiter(SharedFrom(this));
// Recursively update the priority of the thread that depends on the priority of this one.
lock_owner->UpdatePriority();
@@ -340,11 +340,11 @@ void Thread::ChangeCore(u32 core, u64 mask) {
bool Thread::AllWaitObjectsReady() const {
return std::none_of(
wait_objects.begin(), wait_objects.end(),
- [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
+ [this](const std::shared_ptr<WaitObject>& object) { return object->ShouldWait(this); });
}
-bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
- SharedPtr<WaitObject> object, std::size_t index) {
+bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
+ std::shared_ptr<WaitObject> object, std::size_t index) {
ASSERT(wakeup_callback);
return wakeup_callback(reason, std::move(thread), std::move(object), index);
}
@@ -401,7 +401,7 @@ void Thread::SetCurrentPriority(u32 new_priority) {
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
- for (s32 core = max_cores - 1; core >= 0; core--) {
+ for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
if (((mask >> core) & 1) != 0) {
return core;
}
@@ -425,7 +425,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
if (old_affinity_mask != new_affinity_mask) {
const s32 old_core = processor_id;
if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
- if (ideal_core < 0) {
+ if (static_cast<s32>(ideal_core) < 0) {
processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
} else {
processor_id = ideal_core;
@@ -447,23 +447,23 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
ThreadSchedStatus::Runnable) {
// In this case the thread was running, now it's pausing/exitting
if (processor_id >= 0) {
- scheduler.Unschedule(current_priority, processor_id, this);
+ scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this);
}
- for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
- if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
- scheduler.Unsuggest(current_priority, static_cast<u32>(core), this);
+ for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
+ scheduler.Unsuggest(current_priority, core, this);
}
}
} else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
// The thread is now set to running from being stopped
if (processor_id >= 0) {
- scheduler.Schedule(current_priority, processor_id, this);
+ scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
}
- for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
- if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
- scheduler.Suggest(current_priority, static_cast<u32>(core), this);
+ for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
+ scheduler.Suggest(current_priority, core, this);
}
}
}
@@ -477,11 +477,11 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
}
auto& scheduler = Core::System::GetInstance().GlobalScheduler();
if (processor_id >= 0) {
- scheduler.Unschedule(old_priority, processor_id, this);
+ scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this);
}
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
- if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
+ if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
scheduler.Unsuggest(old_priority, core, this);
}
}
@@ -491,14 +491,14 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
if (processor_id >= 0) {
if (current_thread == this) {
- scheduler.SchedulePrepend(current_priority, processor_id, this);
+ scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this);
} else {
- scheduler.Schedule(current_priority, processor_id, this);
+ scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
}
}
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
- if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
+ if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
scheduler.Suggest(current_priority, core, this);
}
}
@@ -515,7 +515,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (((old_affinity_mask >> core) & 1) != 0) {
- if (core == old_core) {
+ if (core == static_cast<u32>(old_core)) {
scheduler.Unschedule(current_priority, core, this);
} else {
scheduler.Unsuggest(current_priority, core, this);
@@ -525,7 +525,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (((affinity_mask >> core) & 1) != 0) {
- if (core == processor_id) {
+ if (core == static_cast<u32>(processor_id)) {
scheduler.Schedule(current_priority, core, this);
} else {
scheduler.Suggest(current_priority, core, this);