summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp24
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h27
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp14
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h20
-rw-r--r--src/core/hle/kernel/physical_core.cpp41
-rw-r--r--src/core/hle/kernel/physical_core.h36
-rw-r--r--src/core/hle/kernel/slab_helpers.h13
7 files changed, 81 insertions, 94 deletions
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index 40fd0c038..dd912a82d 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -71,26 +71,26 @@ void KSynchronizationObject::Finalize() {
KAutoObject::Finalize();
}
-Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
+Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
KSynchronizationObject** objects, const s32 num_objects,
s64 timeout) {
// Allocate space on stack for thread nodes.
std::vector<ThreadListNode> thread_nodes(num_objects);
// Prepare for wait.
- KThread* thread = GetCurrentThreadPointer(kernel_ctx);
+ KThread* thread = GetCurrentThreadPointer(kernel);
KHardwareTimer* timer{};
- ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
- thread_nodes.data(), num_objects);
+ ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel, objects, thread_nodes.data(),
+ num_objects);
{
// Setup the scheduling lock and sleep.
- KScopedSchedulerLockAndSleep slp(kernel_ctx, std::addressof(timer), thread, timeout);
+ KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), thread, timeout);
// Check if the thread should terminate.
if (thread->IsTerminationRequested()) {
slp.CancelSleep();
- return ResultTerminationRequested;
+ R_THROW(ResultTerminationRequested);
}
// Check if any of the objects are already signaled.
@@ -100,21 +100,21 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
if (objects[i]->IsSignaled()) {
*out_index = i;
slp.CancelSleep();
- return ResultSuccess;
+ R_THROW(ResultSuccess);
}
}
// Check if the timeout is zero.
if (timeout == 0) {
slp.CancelSleep();
- return ResultTimedOut;
+ R_THROW(ResultTimedOut);
}
// Check if waiting was canceled.
if (thread->IsWaitCancelled()) {
slp.CancelSleep();
thread->ClearWaitCancelled();
- return ResultCancelled;
+ R_THROW(ResultCancelled);
}
// Add the waiters.
@@ -141,7 +141,7 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
*out_index = thread->GetSyncedIndex();
// Get the wait result.
- return thread->GetWaitResult();
+ R_RETURN(thread->GetWaitResult());
}
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
@@ -158,7 +158,7 @@ void KSynchronizationObject::NotifyAvailable(Result result) {
}
// Iterate over each thread.
- for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
cur_node->thread->NotifyAvailable(this, result);
}
}
@@ -169,7 +169,7 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co
// If debugging, dump the list of waiters.
{
KScopedSchedulerLock lock(kernel);
- for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
+ for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
threads.emplace_back(cur_node->thread);
}
}
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index 8d8122ab7..d55a2673d 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -24,31 +24,30 @@ public:
KThread* thread{};
};
- [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout);
+ static Result Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects,
+ const s32 num_objects, s64 timeout);
void Finalize() override;
- [[nodiscard]] virtual bool IsSignaled() const = 0;
+ virtual bool IsSignaled() const = 0;
- [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
+ std::vector<KThread*> GetWaitingThreadsForDebugging() const;
void LinkNode(ThreadListNode* node_) {
// Link the node to the list.
- if (thread_list_tail == nullptr) {
- thread_list_head = node_;
+ if (m_thread_list_tail == nullptr) {
+ m_thread_list_head = node_;
} else {
- thread_list_tail->next = node_;
+ m_thread_list_tail->next = node_;
}
- thread_list_tail = node_;
+ m_thread_list_tail = node_;
}
void UnlinkNode(ThreadListNode* node_) {
// Unlink the node from the list.
ThreadListNode* prev_ptr =
- reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
+ reinterpret_cast<ThreadListNode*>(std::addressof(m_thread_list_head));
ThreadListNode* prev_val = nullptr;
ThreadListNode *prev, *tail_prev;
@@ -59,8 +58,8 @@ public:
prev_val = prev_ptr;
} while (prev_ptr != node_);
- if (thread_list_tail == node_) {
- thread_list_tail = tail_prev;
+ if (m_thread_list_tail == node_) {
+ m_thread_list_tail = tail_prev;
}
prev->next = node_->next;
@@ -78,8 +77,8 @@ protected:
}
private:
- ThreadListNode* thread_list_head{};
- ThreadListNode* thread_list_tail{};
+ ThreadListNode* m_thread_list_head{};
+ ThreadListNode* m_thread_list_tail{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index faa5c73b5..c25cc2e39 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -16,18 +16,18 @@ KTransferMemory::~KTransferMemory() = default;
Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
Svc::MemoryPermission owner_perm_) {
// Set members.
- owner = GetCurrentProcessPointer(kernel);
+ m_owner = GetCurrentProcessPointer(kernel);
// TODO(bunnei): Lock for transfer memory
// Set remaining tracking members.
- owner->Open();
- owner_perm = owner_perm_;
- address = address_;
- size = size_;
- is_initialized = true;
+ m_owner->Open();
+ m_owner_perm = owner_perm_;
+ m_address = address_;
+ m_size = size_;
+ m_is_initialized = true;
- return ResultSuccess;
+ R_SUCCEED();
}
void KTransferMemory::Finalize() {
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 85d508ee7..9a37bd903 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -31,33 +31,33 @@ public:
void Finalize() override;
bool IsInitialized() const override {
- return is_initialized;
+ return m_is_initialized;
}
uintptr_t GetPostDestroyArgument() const override {
- return reinterpret_cast<uintptr_t>(owner);
+ return reinterpret_cast<uintptr_t>(m_owner);
}
static void PostDestroy(uintptr_t arg);
KProcess* GetOwner() const override {
- return owner;
+ return m_owner;
}
VAddr GetSourceAddress() const {
- return address;
+ return m_address;
}
size_t GetSize() const {
- return is_initialized ? size : 0;
+ return m_is_initialized ? m_size : 0;
}
private:
- KProcess* owner{};
- VAddr address{};
- Svc::MemoryPermission owner_perm{};
- size_t size{};
- bool is_initialized{};
+ KProcess* m_owner{};
+ VAddr m_address{};
+ Svc::MemoryPermission m_owner_perm{};
+ size_t m_size{};
+ bool m_is_initialized{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 3044922ac..2e0c36129 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -10,14 +10,14 @@
namespace Kernel {
-PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_)
- : core_index{core_index_}, system{system_}, scheduler{scheduler_} {
+PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KScheduler& scheduler)
+ : m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
auto& kernel = system.Kernel();
- arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
- system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
+ system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
#else
#error Platform not supported yet.
#endif
@@ -25,13 +25,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KSche
PhysicalCore::~PhysicalCore() = default;
-void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
+void PhysicalCore::Initialize(bool is_64_bit) {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
- auto& kernel = system.Kernel();
+ auto& kernel = m_system.Kernel();
if (!is_64_bit) {
// We already initialized a 64-bit core, replace with a 32-bit one.
- arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
- system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ m_arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
+ m_system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index);
}
#else
#error Platform not supported yet.
@@ -39,31 +39,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
}
void PhysicalCore::Run() {
- arm_interface->Run();
- arm_interface->ClearExclusiveState();
+ m_arm_interface->Run();
+ m_arm_interface->ClearExclusiveState();
}
void PhysicalCore::Idle() {
- std::unique_lock lk{guard};
- on_interrupt.wait(lk, [this] { return is_interrupted; });
+ std::unique_lock lk{m_guard};
+ m_on_interrupt.wait(lk, [this] { return m_is_interrupted; });
}
bool PhysicalCore::IsInterrupted() const {
- return is_interrupted;
+ return m_is_interrupted;
}
void PhysicalCore::Interrupt() {
- std::unique_lock lk{guard};
- is_interrupted = true;
- arm_interface->SignalInterrupt();
- on_interrupt.notify_all();
+ std::unique_lock lk{m_guard};
+ m_is_interrupted = true;
+ m_arm_interface->SignalInterrupt();
+ m_on_interrupt.notify_all();
}
void PhysicalCore::ClearInterrupt() {
- std::unique_lock lk{guard};
- is_interrupted = false;
- arm_interface->ClearInterrupt();
- on_interrupt.notify_all();
+ std::unique_lock lk{m_guard};
+ m_is_interrupted = false;
+ m_arm_interface->ClearInterrupt();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index fb8e7933e..5cb398fdc 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -47,46 +47,38 @@ public:
bool IsInterrupted() const;
bool IsInitialized() const {
- return arm_interface != nullptr;
+ return m_arm_interface != nullptr;
}
Core::ARM_Interface& ArmInterface() {
- return *arm_interface;
+ return *m_arm_interface;
}
const Core::ARM_Interface& ArmInterface() const {
- return *arm_interface;
- }
-
- bool IsMainCore() const {
- return core_index == 0;
- }
-
- bool IsSystemCore() const {
- return core_index == 3;
+ return *m_arm_interface;
}
std::size_t CoreIndex() const {
- return core_index;
+ return m_core_index;
}
Kernel::KScheduler& Scheduler() {
- return scheduler;
+ return m_scheduler;
}
const Kernel::KScheduler& Scheduler() const {
- return scheduler;
+ return m_scheduler;
}
private:
- const std::size_t core_index;
- Core::System& system;
- Kernel::KScheduler& scheduler;
-
- std::mutex guard;
- std::condition_variable on_interrupt;
- std::unique_ptr<Core::ARM_Interface> arm_interface;
- bool is_interrupted{};
+ const std::size_t m_core_index;
+ Core::System& m_system;
+ Kernel::KScheduler& m_scheduler;
+
+ std::mutex m_guard;
+ std::condition_variable m_on_interrupt;
+ std::unique_ptr<Core::ARM_Interface> m_arm_interface;
+ bool m_is_interrupted{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index 0228ce188..b9f5066de 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -132,7 +132,7 @@ protected:
template <typename Derived, typename Base>
class KAutoObjectWithSlabHeapAndContainer : public Base {
- static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);
+ static_assert(std::is_base_of_v<KAutoObjectWithList, Base>);
private:
static Derived* Allocate(KernelCore& kernel) {
@@ -144,18 +144,18 @@ private:
}
public:
- KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
+ KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_) {}
virtual ~KAutoObjectWithSlabHeapAndContainer() {}
virtual void Destroy() override {
const bool is_initialized = this->IsInitialized();
uintptr_t arg = 0;
if (is_initialized) {
- kernel.ObjectListContainer().Unregister(this);
+ Base::kernel.ObjectListContainer().Unregister(this);
arg = this->GetPostDestroyArgument();
this->Finalize();
}
- Free(kernel, static_cast<Derived*>(this));
+ Free(Base::kernel, static_cast<Derived*>(this));
if (is_initialized) {
Derived::PostDestroy(arg);
}
@@ -169,7 +169,7 @@ public:
}
size_t GetSlabIndex() const {
- return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
+ return SlabHeap<Derived>(Base::kernel).GetObjectIndex(static_cast<const Derived*>(this));
}
public:
@@ -209,9 +209,6 @@ public:
static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
-
-protected:
- KernelCore& kernel;
};
} // namespace Kernel