summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp6
-rw-r--r--src/core/hle/kernel/hle_ipc.h13
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp17
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h19
-rw-r--r--src/core/hle/kernel/k_client_port.cpp4
-rw-r--r--src/core/hle/kernel/k_client_port.h4
-rw-r--r--src/core/hle/kernel/k_client_session.cpp4
-rw-r--r--src/core/hle/kernel/k_client_session.h6
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp12
-rw-r--r--src/core/hle/kernel/k_code_memory.h14
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp18
-rw-r--r--src/core/hle/kernel/k_condition_variable.h6
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp6
-rw-r--r--src/core/hle/kernel/k_handle_table.h8
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp5
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.cpp3
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp3
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp16
-rw-r--r--src/core/hle/kernel/k_memory_manager.h16
-rw-r--r--src/core/hle/kernel/k_page_group.h (renamed from src/core/hle/kernel/k_page_linked_list.h)10
-rw-r--r--src/core/hle/kernel/k_page_table.cpp177
-rw-r--r--src/core/hle/kernel/k_page_table.h173
-rw-r--r--src/core/hle/kernel/k_port.cpp2
-rw-r--r--src/core/hle/kernel/k_port.h2
-rw-r--r--src/core/hle/kernel/k_process.cpp45
-rw-r--r--src/core/hle/kernel/k_process.h31
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp6
-rw-r--r--src/core/hle/kernel/k_readable_event.h6
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp2
-rw-r--r--src/core/hle/kernel/k_resource_limit.h4
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp32
-rw-r--r--src/core/hle/kernel/k_scheduler.h10
-rw-r--r--src/core/hle/kernel/k_server_session.cpp12
-rw-r--r--src/core/hle/kernel/k_server_session.h12
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp16
-rw-r--r--src/core/hle/kernel/k_shared_memory.h18
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp13
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h8
-rw-r--r--src/core/hle/kernel/k_thread.cpp73
-rw-r--r--src/core/hle/kernel/k_thread.h61
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h4
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp9
-rw-r--r--src/core/hle/kernel/k_thread_queue.h9
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp4
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h4
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp4
-rw-r--r--src/core/hle/kernel/k_writable_event.h4
-rw-r--r--src/core/hle/kernel/kernel.cpp13
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/process_capability.cpp43
-rw-r--r--src/core/hle/kernel/process_capability.h38
-rw-r--r--src/core/hle/kernel/svc.cpp331
-rw-r--r--src/core/hle/kernel/svc_results.h58
-rw-r--r--src/core/hle/kernel/svc_wrap.h124
55 files changed, 773 insertions, 772 deletions
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 4650d25b0..45135a07f 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -188,8 +188,8 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
}
-ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf) {
+Result HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
+ u32_le* src_cmdbuf) {
ParseCommandBuffer(handle_table, src_cmdbuf, true);
if (command_header->IsCloseCommand()) {
@@ -202,7 +202,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTab
return ResultSuccess;
}
-ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
+Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
auto current_offset = handles_offset;
auto& owner_process = *requesting_thread.GetOwnerProcess();
auto& handle_table = owner_process.GetHandleTable();
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 0ddc8df9e..d3abeee85 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -18,7 +18,7 @@
#include "core/hle/ipc.h"
#include "core/hle/kernel/svc_common.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -71,10 +71,10 @@ public:
* it should be used to differentiate which client (As in ClientSession) we're answering to.
* TODO(Subv): Use a wrapper structure to hold all the information relevant to
* this request (ServerSession, Originator thread, Translated command buffer, etc).
- * @returns ResultCode the result code of the translate operation.
+ * @returns Result the result code of the translate operation.
*/
- virtual ResultCode HandleSyncRequest(Kernel::KServerSession& session,
- Kernel::HLERequestContext& context) = 0;
+ virtual Result HandleSyncRequest(Kernel::KServerSession& session,
+ Kernel::HLERequestContext& context) = 0;
/**
* Signals that a client has just connected to this HLE handler and keeps the
@@ -212,11 +212,10 @@ public:
}
/// Populates this context with data from the requesting process/thread.
- ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf);
+ Result PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread.
- ResultCode WriteToOutgoingCommandBuffer(KThread& requesting_thread);
+ Result WriteToOutgoingCommandBuffer(KThread& requesting_thread);
u32_le GetHipcCommand() const {
return command;
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 04cf86d52..f85b11557 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -90,8 +90,7 @@ public:
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// If the thread is waiting on an address arbiter, remove it from the tree.
if (waiting_thread->IsWaitingForAddressArbiter()) {
m_tree->erase(m_tree->iterator_to(*waiting_thread));
@@ -108,7 +107,7 @@ private:
} // namespace
-ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
+Result KAddressArbiter::Signal(VAddr addr, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -131,7 +130,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
return ResultSuccess;
}
-ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -164,7 +163,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
return ResultSuccess;
}
-ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -232,9 +231,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
return ResultSuccess;
}
-ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
+Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
@@ -285,9 +284,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
return cur_thread->GetWaitResult();
}
-ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
+Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index 5fa19d386..e4085ae22 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -8,7 +8,7 @@
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/svc_types.h"
-union ResultCode;
+union Result;
namespace Core {
class System;
@@ -25,8 +25,7 @@ public:
explicit KAddressArbiter(Core::System& system_);
~KAddressArbiter();
- [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
- s32 count) {
+ [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
switch (type) {
case Svc::SignalType::Signal:
return Signal(addr, count);
@@ -39,8 +38,8 @@ public:
return ResultUnknown;
}
- [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
- s64 timeout) {
+ [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
+ s64 timeout) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
return WaitIfLessThan(addr, value, false, timeout);
@@ -54,11 +53,11 @@ public:
}
private:
- [[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
- [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
- [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
+ [[nodiscard]] Result Signal(VAddr addr, s32 count);
+ [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
+ [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
ThreadTree thread_tree;
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index ef168fe87..d63e77d15 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -59,8 +59,8 @@ bool KClientPort::IsSignaled() const {
return num_sessions < max_sessions;
}
-ResultCode KClientPort::CreateSession(KClientSession** out,
- std::shared_ptr<SessionRequestManager> session_manager) {
+Result KClientPort::CreateSession(KClientSession** out,
+ std::shared_ptr<SessionRequestManager> session_manager) {
// Reserve a new session from the resource limit.
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
LimitableResource::Sessions);
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h
index 54bb05e20..ef8583efc 100644
--- a/src/core/hle/kernel/k_client_port.h
+++ b/src/core/hle/kernel/k_client_port.h
@@ -53,8 +53,8 @@ public:
void Destroy() override;
bool IsSignaled() const override;
- ResultCode CreateSession(KClientSession** out,
- std::shared_ptr<SessionRequestManager> session_manager = nullptr);
+ Result CreateSession(KClientSession** out,
+ std::shared_ptr<SessionRequestManager> session_manager = nullptr);
private:
std::atomic<s32> num_sessions{};
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index 731af079c..b2a887b14 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -21,8 +21,8 @@ void KClientSession::Destroy() {
void KClientSession::OnServerClosed() {}
-ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
+Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
// Signal the server session that new data is available
return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
}
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index 7a7ec8450..0c750d756 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -9,7 +9,7 @@
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -46,8 +46,8 @@ public:
return parent;
}
- ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
+ Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
void OnServerClosed();
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 4ae40ec8e..da57ceb21 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -7,7 +7,7 @@
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
@@ -19,7 +19,7 @@ namespace Kernel {
KCodeMemory::KCodeMemory(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
-ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
+Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
// Set members.
m_owner = kernel.CurrentProcess();
@@ -62,7 +62,7 @@ void KCodeMemory::Finalize() {
m_owner->Close();
}
-ResultCode KCodeMemory::Map(VAddr address, size_t size) {
+Result KCodeMemory::Map(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -82,7 +82,7 @@ ResultCode KCodeMemory::Map(VAddr address, size_t size) {
return ResultSuccess;
}
-ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
+Result KCodeMemory::Unmap(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -99,7 +99,7 @@ ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
return ResultSuccess;
}
-ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
+Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -133,7 +133,7 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
return ResultSuccess;
}
-ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
+Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index ab06b6f29..2410f74a3 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -7,7 +7,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_light_lock.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
@@ -29,13 +29,13 @@ class KCodeMemory final
public:
explicit KCodeMemory(KernelCore& kernel_);
- ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
+ Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
void Finalize();
- ResultCode Map(VAddr address, size_t size);
- ResultCode Unmap(VAddr address, size_t size);
- ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
- ResultCode UnmapFromOwner(VAddr address, size_t size);
+ Result Map(VAddr address, size_t size);
+ Result Unmap(VAddr address, size_t size);
+ Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
+ Result UnmapFromOwner(VAddr address, size_t size);
bool IsInitialized() const {
return m_is_initialized;
@@ -53,7 +53,7 @@ public:
}
private:
- KPageLinkedList m_page_group{};
+ KPageGroup m_page_group{};
KProcess* m_owner{};
VAddr m_address{};
KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 43bcd253d..124149697 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -61,8 +61,7 @@ public:
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
: KThreadQueue(kernel_) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
@@ -80,8 +79,7 @@ public:
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
@@ -105,8 +103,8 @@ KConditionVariable::KConditionVariable(Core::System& system_)
KConditionVariable::~KConditionVariable() = default;
-ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
- KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
+Result KConditionVariable::SignalToAddress(VAddr addr) {
+ KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
@@ -126,7 +124,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
// Write the value to userspace.
- ResultCode result{ResultSuccess};
+ Result result{ResultSuccess};
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
result = ResultSuccess;
} else {
@@ -146,8 +144,8 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
}
-ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.
@@ -261,7 +259,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
}
}
-ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
+Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 7bc749d98..fad4ed011 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -25,12 +25,12 @@ public:
~KConditionVariable();
// Arbitration
- [[nodiscard]] ResultCode SignalToAddress(VAddr addr);
- [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
+ [[nodiscard]] Result SignalToAddress(VAddr addr);
+ [[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
// Condition variable
void Signal(u64 cv_key, s32 count);
- [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+ [[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
private:
void SignalImpl(KThread* thread);
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index c453927ad..e830ca46e 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -8,7 +8,7 @@ namespace Kernel {
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
KHandleTable::~KHandleTable() = default;
-ResultCode KHandleTable::Finalize() {
+Result KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
@@ -62,7 +62,7 @@ bool KHandleTable::Remove(Handle handle) {
return true;
}
-ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
+Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
@@ -85,7 +85,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
return ResultSuccess;
}
-ResultCode KHandleTable::Reserve(Handle* out_handle) {
+Result KHandleTable::Reserve(Handle* out_handle) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index befdb2ec9..0864a737c 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -30,7 +30,7 @@ public:
explicit KHandleTable(KernelCore& kernel_);
~KHandleTable();
- ResultCode Initialize(s32 size) {
+ Result Initialize(s32 size) {
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
// Initialize all fields.
@@ -60,7 +60,7 @@ public:
return m_max_count;
}
- ResultCode Finalize();
+ Result Finalize();
bool Remove(Handle handle);
template <typename T = KAutoObject>
@@ -100,10 +100,10 @@ public:
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
- ResultCode Reserve(Handle* out_handle);
+ Result Reserve(Handle* out_handle);
void Unreserve(Handle handle);
- ResultCode Add(Handle* out_handle, KAutoObject* obj);
+ Result Add(Handle* out_handle, KAutoObject* obj);
void Register(Handle handle, KAutoObject* obj);
template <typename T>
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index cf9ed80d0..d606a7f86 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -15,8 +15,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
return;
}
- auto& scheduler = kernel.Scheduler(core_id);
- auto& current_thread = *scheduler.GetCurrentThread();
+ auto& current_thread = GetCurrentThread(kernel);
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
@@ -26,7 +25,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
process->PinCurrentThread(core_id);
// Set the interrupt flag for the thread.
- scheduler.GetCurrentThread()->SetInterruptFlag();
+ GetCurrentThread(kernel).SetInterruptFlag();
}
}
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
index a40f35f45..cade99cfd 100644
--- a/src/core/hle/kernel/k_light_condition_variable.cpp
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -17,8 +17,7 @@ public:
bool term)
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Only process waits if we're allowed to.
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
return;
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 0225734b4..43185320d 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -15,8 +15,7 @@ class ThreadQueueImplForKLightLock final : public KThreadQueue {
public:
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 58e540f31..5b0a9963a 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -11,7 +11,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/initial_process.h"
#include "core/hle/kernel/k_memory_manager.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -208,8 +208,8 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
return allocated_block;
}
-ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
- Direction dir, bool random) {
+Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
+ Direction dir, bool random) {
// Choose a heap based on our page size request.
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
@@ -257,7 +257,7 @@ ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t nu
return ResultSuccess;
}
-ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) {
+Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
ASSERT(out != nullptr);
ASSERT(out->GetNumPages() == 0);
@@ -293,8 +293,8 @@ ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_page
return ResultSuccess;
}
-ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages,
- u32 option, u64 process_id, u8 fill_pattern) {
+Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
+ u64 process_id, u8 fill_pattern) {
ASSERT(out != nullptr);
ASSERT(out->GetNumPages() == 0);
@@ -370,12 +370,12 @@ void KMemoryManager::Close(PAddr address, size_t num_pages) {
}
}
-void KMemoryManager::Close(const KPageLinkedList& pg) {
+void KMemoryManager::Close(const KPageGroup& pg) {
for (const auto& node : pg.Nodes()) {
Close(node.GetAddress(), node.GetNumPages());
}
}
-void KMemoryManager::Open(const KPageLinkedList& pg) {
+void KMemoryManager::Open(const KPageGroup& pg) {
for (const auto& node : pg.Nodes()) {
Open(node.GetAddress(), node.GetNumPages());
}
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index c7923cb82..dcb9b6348 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -19,7 +19,7 @@ class System;
namespace Kernel {
-class KPageLinkedList;
+class KPageGroup;
class KMemoryManager final {
public:
@@ -65,17 +65,17 @@ public:
}
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
- ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option);
- ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option,
- u64 process_id, u8 fill_pattern);
+ Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
+ Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
+ u8 fill_pattern);
static constexpr size_t MaxManagerCount = 10;
void Close(PAddr address, size_t num_pages);
- void Close(const KPageLinkedList& pg);
+ void Close(const KPageGroup& pg);
void Open(PAddr address, size_t num_pages);
- void Open(const KPageLinkedList& pg);
+ void Open(const KPageGroup& pg);
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
@@ -262,8 +262,8 @@ private:
}
}
- ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
- Direction dir, bool random);
+ Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
+ bool random);
private:
Core::System& system;
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_group.h
index 1f79c8330..968753992 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -12,7 +12,7 @@
namespace Kernel {
-class KPageLinkedList final {
+class KPageGroup final {
public:
class Node final {
public:
@@ -36,8 +36,8 @@ public:
};
public:
- KPageLinkedList() = default;
- KPageLinkedList(u64 address, u64 num_pages) {
+ KPageGroup() = default;
+ KPageGroup(u64 address, u64 num_pages) {
ASSERT(AddBlock(address, num_pages).IsSuccess());
}
@@ -57,7 +57,7 @@ public:
return num_pages;
}
- bool IsEqual(KPageLinkedList& other) const {
+ bool IsEqual(KPageGroup& other) const {
auto this_node = nodes.begin();
auto other_node = other.nodes.begin();
while (this_node != nodes.end() && other_node != other.nodes.end()) {
@@ -72,7 +72,7 @@ public:
return this_node == nodes.end() && other_node == other.nodes.end();
}
- ResultCode AddBlock(u64 address, u64 num_pages) {
+ Result AddBlock(u64 address, u64 num_pages) {
if (!num_pages) {
return ResultSuccess;
}
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 39b89da53..d975de844 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -9,7 +9,7 @@
#include "core/hle/kernel/k_address_space_info.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -47,9 +47,9 @@ KPageTable::KPageTable(Core::System& system_)
KPageTable::~KPageTable() = default;
-ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
- bool enable_aslr, VAddr code_addr,
- std::size_t code_size, KMemoryManager::Pool pool) {
+Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
+ VAddr code_addr, std::size_t code_size,
+ KMemoryManager::Pool pool) {
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
@@ -257,8 +257,8 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
return InitializeMemoryLayout(start, end);
}
-ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
+Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
const u64 size{num_pages * PageSize};
// Validate the mapping request.
@@ -271,7 +271,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
&pg, num_pages,
KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
@@ -283,7 +283,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
return ResultSuccess;
}
-ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
+Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -313,7 +313,7 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
const std::size_t num_pages = size / PageSize;
// Create page groups for the memory being mapped.
- KPageLinkedList pg;
+ KPageGroup pg;
AddRegionToPages(src_address, num_pages, pg);
// Reprotect the source as kernel-read/not mapped.
@@ -344,8 +344,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
return ResultSuccess;
}
-ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy) {
+Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -489,7 +489,7 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
return address;
}
-ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
+Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -541,7 +541,7 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
return ResultSuccess;
}
-bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) {
+bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
@@ -630,8 +630,8 @@ bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
-ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
- KPageTable& src_page_table, VAddr src_addr) {
+Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+ VAddr src_addr) {
KScopedLightLock lk(general_lock);
const std::size_t num_pages{size / PageSize};
@@ -660,7 +660,7 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
-ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
@@ -721,7 +721,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the new memory.
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
&pg, (size - mapped_size) / PageSize,
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
@@ -903,7 +903,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
}
}
-ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
+Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
@@ -972,7 +972,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
}
// Make a page group for the unmap region.
- KPageLinkedList pg;
+ KPageGroup pg;
{
auto& impl = this->PageTableImpl();
@@ -1134,7 +1134,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState src_state{};
@@ -1147,7 +1147,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
return ResultInvalidCurrentMemory;
}
- KPageLinkedList page_linked_list;
+ KPageGroup page_linked_list;
const std::size_t num_pages{size / PageSize};
AddRegionToPages(src_addr, num_pages, page_linked_list);
@@ -1173,7 +1173,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
return ResultSuccess;
}
-ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState src_state{};
@@ -1188,8 +1188,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
- KPageLinkedList src_pages;
- KPageLinkedList dst_pages;
+ KPageGroup src_pages;
+ KPageGroup dst_pages;
const std::size_t num_pages{size / PageSize};
AddRegionToPages(src_addr, num_pages, src_pages);
@@ -1215,8 +1215,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
- KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
+ KMemoryPermission perm) {
ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr};
@@ -1239,8 +1239,8 @@ ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_l
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list,
- KMemoryState state, KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
+ KMemoryPermission perm) {
// Check that the map is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
@@ -1263,10 +1263,10 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
- PAddr phys_addr, bool is_pa_valid, VAddr region_start,
- std::size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
+ PAddr phys_addr, bool is_pa_valid, VAddr region_start,
+ std::size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm) {
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
// Ensure this is a valid map request.
@@ -1303,7 +1303,7 @@ ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::siz
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
+Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr};
@@ -1321,8 +1321,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
- KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) {
// Check that the unmap is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
@@ -1345,7 +1344,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
// Check that the unmap is in range.
const std::size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -1369,10 +1368,10 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
return ResultSuccess;
}
-ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
// Ensure that the page group isn't null.
ASSERT(out != nullptr);
@@ -1394,8 +1393,8 @@ ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address,
return ResultSuccess;
}
-ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -1467,7 +1466,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
return QueryInfoImpl(addr);
}
-ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
+Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
KScopedLightLock lk(general_lock);
KMemoryState state{};
@@ -1485,7 +1484,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
return ResultSuccess;
}
-ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
+Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState state{};
@@ -1500,8 +1499,8 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -1528,7 +1527,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
return ResultSuccess;
}
-ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
+Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
const size_t num_pages = size / PageSize;
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
KMemoryAttribute::SetMask);
@@ -1563,7 +1562,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
return ResultSuccess;
}
-ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
+Result KPageTable::SetMaxHeapSize(std::size_t size) {
// Lock the table.
KScopedLightLock lk(general_lock);
@@ -1575,7 +1574,7 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
+Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Lock the physical memory mutex.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
@@ -1642,7 +1641,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the heap extension.
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
&pg, allocation_size / PageSize,
KMemoryManager::EncodeOption(memory_pool, allocation_option)));
@@ -1717,7 +1716,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
if (is_map_only) {
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
- KPageLinkedList page_group;
+ KPageGroup page_group;
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
&page_group, needed_num_pages,
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
@@ -1729,11 +1728,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
return addr;
}
-ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
- if (const ResultCode result{CheckMemoryState(
+ if (const Result result{CheckMemoryState(
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
@@ -1752,11 +1751,11 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
- if (const ResultCode result{CheckMemoryState(
+ if (const Result result{CheckMemoryState(
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
@@ -1775,7 +1774,7 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
return ResultSuccess;
}
-ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) {
+Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
return this->LockMemoryAndOpen(
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
@@ -1785,15 +1784,14 @@ ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::
KMemoryAttribute::Locked);
}
-ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size,
- const KPageLinkedList& pg) {
+Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
return this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
}
-ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
+Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
block_manager = std::make_unique<KMemoryBlockManager>(start, end);
return ResultSuccess;
@@ -1818,7 +1816,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
}
void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
- KPageLinkedList& page_linked_list) {
+ KPageGroup& page_linked_list) {
VAddr addr{start};
while (addr < start + (num_pages * PageSize)) {
const PAddr paddr{GetPhysicalAddr(addr)};
@@ -1837,8 +1835,8 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page
IsKernel() ? 1 : 4);
}
-ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
- OperationType operation) {
+Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+ OperationType operation) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(Common::IsAligned(addr, PageSize));
@@ -1862,8 +1860,8 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
return ResultSuccess;
}
-ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr) {
+Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+ OperationType operation, PAddr map_addr) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(num_pages > 0);
@@ -2005,10 +2003,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
}
}
-ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
// Validate the states match expectation.
R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory);
R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory);
@@ -2017,12 +2015,11 @@ ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState st
return ResultSuccess;
}
-ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm,
- KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
+ std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
@@ -2060,12 +2057,12 @@ ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed
return ResultSuccess;
}
-ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
- VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
+ VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
@@ -2122,11 +2119,11 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
return ResultSuccess;
}
-ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
- size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
// Validate basic preconditions.
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
@@ -2180,11 +2177,11 @@ ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_pad
return ResultSuccess;
}
-ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
+Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageGroup* pg) {
// Validate basic preconditions.
ASSERT((attr_mask & lock_attr) == lock_attr);
ASSERT((attr & lock_attr) == lock_attr);
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 6312eb682..25774f232 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -33,51 +33,49 @@ public:
explicit KPageTable(Core::System& system_);
~KPageTable();
- ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- VAddr code_addr, std::size_t code_size,
- KMemoryManager::Pool pool);
- ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
- KMemoryPermission perm);
- ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
- ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy);
- ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
- VAddr src_addr);
- ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
- ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
- ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
- KMemoryPermission perm);
- ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
- PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
+ Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
+ VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
+ Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
+ Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy);
+ Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+ VAddr src_addr);
+ Result MapPhysicalMemory(VAddr addr, std::size_t size);
+ Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
+ Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+ KMemoryState state, KMemoryPermission perm) {
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
state, perm);
}
- ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
- ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
- ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm);
+ Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
+ Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
+ Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
- ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
- ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
- ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
- ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
- ResultCode SetMaxHeapSize(std::size_t size);
- ResultCode SetHeapSize(VAddr* out, std::size_t size);
+ Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
+ Result ResetTransferMemory(VAddr addr, std::size_t size);
+ Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
+ Result SetMaxHeapSize(std::size_t size);
+ Result SetHeapSize(VAddr* out, std::size_t size);
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr = 0);
- ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
- ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
- ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size);
- ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg);
- ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr);
+ Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
+ Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
+ Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
+ Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
+ Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
return page_table_impl;
@@ -102,83 +100,78 @@ private:
KMemoryAttribute::IpcLocked |
KMemoryAttribute::DeviceShared;
- ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
- ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
- KMemoryPermission perm);
- ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
- PAddr phys_addr, bool is_pa_valid, VAddr region_start,
- std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
- ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
+ Result InitializeMemoryLayout(VAddr start, VAddr end);
+ Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
+ Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+ bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm);
+ Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
- void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
+ void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
std::size_t align);
- ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
- OperationType operation);
- ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr = 0);
+ Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+ OperationType operation);
+ Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+ OperationType operation, PAddr map_addr = 0);
VAddr GetRegionAddress(KMemoryState state) const;
std::size_t GetRegionSize(KMemoryState state) const;
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
- ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const;
- ResultCode CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+ Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
perm, attr_mask, attr);
}
- ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const;
- ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
- VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- ResultCode CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
+ std::size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
}
- ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr);
}
- ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr);
- ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr,
- const KPageLinkedList* pg);
+ Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageGroup* pg);
- ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
- bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages);
+ Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
index 51c2cd1ef..7a5a9dc2a 100644
--- a/src/core/hle/kernel/k_port.cpp
+++ b/src/core/hle/kernel/k_port.cpp
@@ -50,7 +50,7 @@ bool KPort::IsServerClosed() const {
return state == State::ServerClosed;
}
-ResultCode KPort::EnqueueSession(KServerSession* session) {
+Result KPort::EnqueueSession(KServerSession* session) {
KScopedSchedulerLock sl{kernel};
R_UNLESS(state == State::Normal, ResultPortClosed);
diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h
index 1bfecf8c3..0cfc16dab 100644
--- a/src/core/hle/kernel/k_port.h
+++ b/src/core/hle/kernel/k_port.h
@@ -34,7 +34,7 @@ public:
bool IsServerClosed() const;
- ResultCode EnqueueSession(KServerSession* session);
+ Result EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return client;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 77356e592..b662788b3 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -67,8 +67,8 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
}
} // Anonymous namespace
-ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit) {
+Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
+ ProcessType type, KResourceLimit* res_limit) {
auto& kernel = system.Kernel();
process->name = std::move(process_name);
@@ -176,7 +176,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
@@ -193,7 +194,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it.
cur_thread->Unpin();
@@ -217,8 +219,8 @@ void KProcess::UnpinThread(KThread* thread) {
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
- [[maybe_unused]] size_t size) {
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+ [[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(state_lock);
@@ -282,7 +284,7 @@ void KProcess::UnregisterThread(KThread* thread) {
thread_list.remove(thread);
}
-ResultCode KProcess::Reset() {
+Result KProcess::Reset() {
// Lock the process and the scheduler.
KScopedLightLock lk(state_lock);
KScopedSchedulerLock sl{kernel};
@@ -296,7 +298,7 @@ ResultCode KProcess::Reset() {
return ResultSuccess;
}
-ResultCode KProcess::SetActivity(ProcessActivity activity) {
+Result KProcess::SetActivity(ProcessActivity activity) {
// Lock ourselves and the scheduler.
KScopedLightLock lk{state_lock};
KScopedLightLock list_lk{list_lock};
@@ -340,8 +342,7 @@ ResultCode KProcess::SetActivity(ProcessActivity activity) {
return ResultSuccess;
}
-ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
- std::size_t code_size) {
+Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
program_id = metadata.GetTitleID();
ideal_core = metadata.GetMainThreadCore();
is_64bit_process = metadata.Is64BitProgram();
@@ -356,24 +357,24 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
return ResultLimitReached;
}
// Initialize proces address space
- if (const ResultCode result{
- page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
- code_size, KMemoryManager::Pool::Application)};
+ if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
+ 0x8000000, code_size,
+ KMemoryManager::Pool::Application)};
result.IsError()) {
return result;
}
// Map process code region
- if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
- code_size / PageSize, KMemoryState::Code,
- KMemoryPermission::None)};
+ if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
+ code_size / PageSize, KMemoryState::Code,
+ KMemoryPermission::None)};
result.IsError()) {
return result;
}
// Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()};
- if (const ResultCode result{
+ if (const Result result{
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
result.IsError()) {
return result;
@@ -420,11 +421,11 @@ void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
- for (auto& thread : in_thread_list) {
+ for (auto* thread : in_thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread == kernel.CurrentScheduler()->GetCurrentThread())
+ if (thread == GetCurrentThreadPointer(kernel))
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -480,7 +481,7 @@ void KProcess::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
}
-ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
+Result KProcess::CreateThreadLocalRegion(VAddr* out) {
KThreadLocalPage* tlp = nullptr;
VAddr tlr = 0;
@@ -531,7 +532,7 @@ ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
return ResultSuccess;
}
-ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
+Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
KThreadLocalPage* page_to_free = nullptr;
// Release the region.
@@ -662,7 +663,7 @@ void KProcess::ChangeStatus(ProcessStatus new_status) {
NotifyAvailable();
}
-ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
+Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
ASSERT(stack_size);
// The kernel always ensures that the given stack size is page aligned.
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index c2086e5ba..5e3e22ad8 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -110,8 +110,8 @@ public:
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
- static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit);
+ static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
+ ProcessType type, KResourceLimit* res_limit);
/// Gets a reference to the process' page table.
KPageTable& PageTable() {
@@ -133,11 +133,11 @@ public:
return handle_table;
}
- ResultCode SignalToAddress(VAddr address) {
+ Result SignalToAddress(VAddr address) {
return condition_var.SignalToAddress(address);
}
- ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
+ Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
return condition_var.WaitForAddress(handle, address, tag);
}
@@ -145,17 +145,16 @@ public:
return condition_var.Signal(cv_key, count);
}
- ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
+ Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
return condition_var.Wait(address, cv_key, tag, ns);
}
- ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
- s32 count) {
+ Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
return address_arbiter.SignalToAddress(address, signal_type, value, count);
}
- ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
- s64 timeout) {
+ Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
+ s64 timeout) {
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
}
@@ -322,7 +321,7 @@ public:
/// @pre The process must be in a signaled state. If this is called on a
/// process instance that is not signaled, ERR_INVALID_STATE will be
/// returned.
- ResultCode Reset();
+ Result Reset();
/**
* Loads process-specifics configuration info with metadata provided
@@ -333,7 +332,7 @@ public:
* @returns ResultSuccess if all relevant metadata was able to be
* loaded and parsed. Otherwise, an error code is returned.
*/
- ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
+ Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
/**
* Starts the main application thread for this process.
@@ -367,7 +366,7 @@ public:
void DoWorkerTaskImpl();
- ResultCode SetActivity(ProcessActivity activity);
+ Result SetActivity(ProcessActivity activity);
void PinCurrentThread(s32 core_id);
void UnpinCurrentThread(s32 core_id);
@@ -377,17 +376,17 @@ public:
return state_lock;
}
- ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
+ Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
- [[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out);
+ [[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
// Frees a used TLS slot identified by the given address
- ResultCode DeleteThreadLocalRegion(VAddr addr);
+ Result DeleteThreadLocalRegion(VAddr addr);
///////////////////////////////////////////////////////////////////////////////////////////////
// Debug watchpoint management
@@ -423,7 +422,7 @@ private:
void ChangeStatus(ProcessStatus new_status);
/// Allocates the main thread stack for the process, given the stack size in bytes.
- ResultCode AllocateMainThreadStack(std::size_t stack_size);
+ Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process
std::unique_ptr<KPageTable> page_table;
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index dddba554d..94c5464fe 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -27,7 +27,7 @@ void KReadableEvent::Destroy() {
}
}
-ResultCode KReadableEvent::Signal() {
+Result KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
@@ -38,13 +38,13 @@ ResultCode KReadableEvent::Signal() {
return ResultSuccess;
}
-ResultCode KReadableEvent::Clear() {
+Result KReadableEvent::Clear() {
Reset();
return ResultSuccess;
}
-ResultCode KReadableEvent::Reset() {
+Result KReadableEvent::Reset() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 5065c7cc0..18dcad289 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -33,9 +33,9 @@ public:
bool IsSignaled() const override;
void Destroy() override;
- ResultCode Signal();
- ResultCode Clear();
- ResultCode Reset();
+ Result Signal();
+ Result Clear();
+ Result Reset();
private:
bool is_signaled{};
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index 3e0ecffdb..010dcf99e 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -73,7 +73,7 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
return value;
}
-ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
+Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
const auto index = static_cast<std::size_t>(which);
KScopedLightLock lk(lock);
R_UNLESS(current_values[index] <= value, ResultInvalidState);
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index 43bf74b8d..65c98c979 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -8,7 +8,7 @@
#include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_light_lock.h"
-union ResultCode;
+union Result;
namespace Core::Timing {
class CoreTiming;
@@ -46,7 +46,7 @@ public:
s64 GetPeakValue(LimitableResource which) const;
s64 GetFreeValue(LimitableResource which) const;
- ResultCode SetLimitValue(LimitableResource which, s64 value);
+ Result SetLimitValue(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value, s64 timeout);
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index fb3b84f3d..d599d2bcb 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -317,7 +317,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
{
KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
- if (best_thread == GetCurrentThread()) {
+ if (best_thread == GetCurrentThreadPointer(kernel)) {
best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
}
@@ -424,7 +424,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -463,7 +463,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -551,7 +551,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -622,7 +622,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
}
KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} {
- switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
+ switch_fiber = std::make_shared<Common::Fiber>([this] { SwitchToCurrent(); });
state.needs_scheduling.store(true);
state.interrupt_task_thread_runnable = false;
state.should_count_idle = false;
@@ -642,7 +642,7 @@ KScheduler::~KScheduler() {
ASSERT(!idle_thread);
}
-KThread* KScheduler::GetCurrentThread() const {
+KThread* KScheduler::GetSchedulerCurrentThread() const {
if (auto result = current_thread.load(); result) {
return result;
}
@@ -654,7 +654,7 @@ u64 KScheduler::GetLastContextSwitchTicks() const {
}
void KScheduler::RescheduleCurrentCore() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
+ ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
auto& phys_core = system.Kernel().PhysicalCore(core_id);
if (phys_core.IsInterrupted()) {
@@ -665,7 +665,7 @@ void KScheduler::RescheduleCurrentCore() {
if (state.needs_scheduling.load()) {
Schedule();
} else {
- GetCurrentThread()->EnableDispatch();
+ GetCurrentThread(system.Kernel()).EnableDispatch();
guard.Unlock();
}
}
@@ -718,13 +718,18 @@ void KScheduler::Reload(KThread* thread) {
void KScheduler::SwitchContextStep2() {
// Load context of new thread
- Reload(GetCurrentThread());
+ Reload(GetCurrentThreadPointer(system.Kernel()));
RescheduleCurrentCore();
}
+void KScheduler::Schedule() {
+ ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
+ this->ScheduleImpl();
+}
+
void KScheduler::ScheduleImpl() {
- KThread* previous_thread = GetCurrentThread();
+ KThread* previous_thread = GetCurrentThreadPointer(system.Kernel());
KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling.store(false);
@@ -762,6 +767,7 @@ void KScheduler::ScheduleImpl() {
old_context = &previous_thread->GetHostContext();
// Set the new thread.
+ SetCurrentThread(system.Kernel(), next_thread);
current_thread.store(next_thread);
guard.Unlock();
@@ -772,11 +778,6 @@ void KScheduler::ScheduleImpl() {
next_scheduler.SwitchContextStep2();
}
-void KScheduler::OnSwitch(void* this_scheduler) {
- KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
- sched->SwitchToCurrent();
-}
-
void KScheduler::SwitchToCurrent() {
while (true) {
{
@@ -805,6 +806,7 @@ void KScheduler::SwitchToCurrent() {
}
}
auto thread = next_thread ? next_thread : idle_thread;
+ SetCurrentThread(system.Kernel(), thread);
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
} while (!is_switch_pending());
}
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 729e006f2..6a4760eca 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -48,7 +48,7 @@ public:
void Reload(KThread* thread);
/// Gets the current running thread
- [[nodiscard]] KThread* GetCurrentThread() const;
+ [[nodiscard]] KThread* GetSchedulerCurrentThread() const;
/// Gets the idle thread
[[nodiscard]] KThread* GetIdleThread() const {
@@ -57,7 +57,7 @@ public:
/// Returns true if the scheduler is idle
[[nodiscard]] bool IsIdle() const {
- return GetCurrentThread() == idle_thread;
+ return GetSchedulerCurrentThread() == idle_thread;
}
/// Gets the timestamp for the last context switch in ticks.
@@ -149,10 +149,7 @@ private:
void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
- void Schedule() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
- this->ScheduleImpl();
- }
+ void Schedule();
/// Switches the CPU's active thread context to that of the specified thread
void ScheduleImpl();
@@ -173,7 +170,6 @@ private:
*/
void UpdateLastContextSwitchTime(KThread* thread, KProcess* process);
- static void OnSwitch(void* this_scheduler);
void SwitchToCurrent();
KThread* prev_thread{};
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 60f8ed470..802c646a6 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -79,7 +79,7 @@ std::size_t KServerSession::NumDomainRequestHandlers() const {
return manager->DomainHandlerCount();
}
-ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
+Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
if (!context.HasDomainMessageHeader()) {
return ResultSuccess;
}
@@ -123,7 +123,7 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
return ResultSuccess;
}
-ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
+Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
@@ -143,8 +143,8 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
return ResultSuccess;
}
-ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
- ResultCode result = ResultSuccess;
+Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
+ Result result = ResultSuccess;
// If the session has been converted to a domain, handle the domain request
if (manager->HasSessionRequestHandler(context)) {
@@ -173,8 +173,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
return result;
}
-ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
+Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
return QueueSyncRequest(thread, memory);
}
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index b628a843f..6d0821945 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -73,10 +73,10 @@ public:
* @param memory Memory context to handle the sync request under.
* @param core_timing Core timing context to schedule the request event under.
*
- * @returns ResultCode from the operation.
+ * @returns Result from the operation.
*/
- ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
+ Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
/// Adds a new domain request handler to the collection of request handlers within
/// this ServerSession instance.
@@ -103,14 +103,14 @@ public:
private:
/// Queues a sync request from the emulated application.
- ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
+ Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
- ResultCode CompleteSyncRequest(HLERequestContext& context);
+ Result CompleteSyncRequest(HLERequestContext& context);
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
/// object handle.
- ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
+ Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
/// This session's HLE request handlers
std::shared_ptr<SessionRequestManager> manager;
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 51d7538ca..b77735736 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -18,12 +18,10 @@ KSharedMemory::~KSharedMemory() {
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
}
-ResultCode KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
- KPageLinkedList&& page_list_,
- Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_,
- PAddr physical_address_, std::size_t size_,
- std::string name_) {
+Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
+ KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
+ Svc::MemoryPermission user_permission_, PAddr physical_address_,
+ std::size_t size_, std::string name_) {
// Set members.
owner_process = owner_process_;
device_memory = &device_memory_;
@@ -67,8 +65,8 @@ void KSharedMemory::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
}
-ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
- Svc::MemoryPermission permissions) {
+Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Svc::MemoryPermission permissions) {
const u64 page_count{(map_size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
@@ -86,7 +84,7 @@ ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size
ConvertToKMemoryPermission(permissions));
}
-ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
+Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
const u64 page_count{(unmap_size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 81de36136..2c1db0e70 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -9,7 +9,7 @@
#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -26,10 +26,10 @@ public:
explicit KSharedMemory(KernelCore& kernel_);
~KSharedMemory() override;
- ResultCode Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
- KPageLinkedList&& page_list_, Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_, PAddr physical_address_,
- std::size_t size_, std::string name_);
+ Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
+ KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
+ Svc::MemoryPermission user_permission_, PAddr physical_address_,
+ std::size_t size_, std::string name_);
/**
* Maps a shared memory block to an address in the target process' address space
@@ -38,8 +38,8 @@ public:
* @param map_size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field)
*/
- ResultCode Map(KProcess& target_process, VAddr address, std::size_t map_size,
- Svc::MemoryPermission permissions);
+ Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Svc::MemoryPermission permissions);
/**
* Unmaps a shared memory block from an address in the target process' address space
@@ -47,7 +47,7 @@ public:
* @param address Address in system memory to unmap shared memory block
* @param unmap_size Size of the shared memory block to unmap
*/
- ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
+ Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
/**
* Gets a pointer to the shared memory block
@@ -77,7 +77,7 @@ public:
private:
Core::DeviceMemory* device_memory;
KProcess* owner_process{};
- KPageLinkedList page_list;
+ KPageGroup page_list;
Svc::MemoryPermission owner_permission{};
Svc::MemoryPermission user_permission{};
PAddr physical_address{};
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index 8554144d5..802dca046 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -22,7 +22,7 @@ public:
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
- ResultCode wait_result) override {
+ Result wait_result) override {
// Determine the sync index, and unlink all nodes.
s32 sync_index = -1;
for (auto i = 0; i < m_count; ++i) {
@@ -45,8 +45,7 @@ public:
KThreadQueue::EndWait(waiting_thread, wait_result);
}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove all nodes from our list.
for (auto i = 0; i < m_count; ++i) {
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
@@ -72,9 +71,9 @@ void KSynchronizationObject::Finalize() {
KAutoObject::Finalize();
}
-ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout) {
+Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout) {
// Allocate space on stack for thread nodes.
std::vector<ThreadListNode> thread_nodes(num_objects);
@@ -148,7 +147,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
KSynchronizationObject::~KSynchronizationObject() = default;
-void KSynchronizationObject::NotifyAvailable(ResultCode result) {
+void KSynchronizationObject::NotifyAvailable(Result result) {
KScopedSchedulerLock sl(kernel);
// If we're not signaled, we've nothing to notify.
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index d7540d6c7..8d8122ab7 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -24,9 +24,9 @@ public:
KThread* thread{};
};
- [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout);
+ [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout);
void Finalize() override;
@@ -72,7 +72,7 @@ protected:
virtual void OnFinalizeSynchronizationObject() {}
- void NotifyAvailable(ResultCode result);
+ void NotifyAvailable(Result result);
void NotifyAvailable() {
return this->NotifyAvailable(ResultSuccess);
}
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index e1bdd6c2c..50cb5fc90 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -80,8 +80,7 @@ public:
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
: KThreadQueue(kernel_), m_wait_list(wl) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread from the wait list.
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
@@ -99,8 +98,8 @@ KThread::KThread(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
KThread::~KThread() = default;
-ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner, ThreadType type) {
+Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -245,46 +244,40 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
return ResultSuccess;
}
-ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
- ThreadType type, std::function<void(void*)>&& init_func,
- void* init_func_parameter) {
+Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
+ ThreadType type, std::function<void()>&& init_func) {
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize emulation parameters.
- thread->host_context =
- std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
+ thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
return ResultSuccess;
}
-ResultCode KThread::InitializeDummyThread(KThread* thread) {
+Result KThread::InitializeDummyThread(KThread* thread) {
return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
}
-ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
+Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
- Core::CpuManager::GetIdleThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParameter());
+ system.GetCpuManager().GetIdleThreadStartFunc());
}
-ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- s32 virt_core) {
+Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg, s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
- Core::CpuManager::GetShutdownThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParameter());
+ system.GetCpuManager().GetShutdownThreadStartFunc());
}
-ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
- s32 prio, s32 virt_core, KProcess* owner) {
+Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
+ KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
- ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParameter());
+ ThreadType::User, system.GetCpuManager().GetGuestThreadStartFunc());
}
void KThread::PostDestroy(uintptr_t arg) {
@@ -382,7 +375,7 @@ void KThread::FinishTermination() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
- core_thread = kernel.Scheduler(i).GetCurrentThread();
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@@ -521,7 +514,7 @@ void KThread::ClearInterruptFlag() {
memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
}
-ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel};
// Get the virtual mask.
@@ -531,7 +524,7 @@ ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
return ResultSuccess;
}
-ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel};
ASSERT(num_core_migration_disables >= 0);
@@ -547,7 +540,7 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
return ResultSuccess;
}
-ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
+Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
ASSERT(parent != nullptr);
ASSERT(v_affinity_mask != 0);
KScopedLightLock lk(activity_pause_lock);
@@ -629,7 +622,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
++thread_core) {
- if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
+ if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -754,12 +747,12 @@ void KThread::WaitUntilSuspended() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
- core_thread = kernel.Scheduler(i).GetCurrentThread();
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
-ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
+Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Lock ourselves.
KScopedLightLock lk(activity_pause_lock);
@@ -820,7 +813,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
// Check if the thread is currently running.
// If it is, we'll need to retry.
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (kernel.Scheduler(i).GetCurrentThread() == this) {
+ if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -832,7 +825,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
return ResultSuccess;
}
-ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
+Result KThread::GetThreadContext3(std::vector<u8>& out) {
// Lock ourselves.
KScopedLightLock lk{activity_pause_lock};
@@ -1000,7 +993,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
return next_lock_owner;
}
-ResultCode KThread::Run() {
+Result KThread::Run() {
while (true) {
KScopedSchedulerLock lk{kernel};
@@ -1061,7 +1054,7 @@ void KThread::Exit() {
}
}
-ResultCode KThread::Sleep(s64 timeout) {
+Result KThread::Sleep(s64 timeout) {
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
ASSERT(this == GetCurrentThreadPointer(kernel));
ASSERT(timeout > 0);
@@ -1117,7 +1110,7 @@ void KThread::BeginWait(KThreadQueue* queue) {
wait_queue = queue;
}
-void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
+void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1127,7 +1120,7 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCod
}
}
-void KThread::EndWait(ResultCode wait_result_) {
+void KThread::EndWait(Result wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1146,7 +1139,7 @@ void KThread::EndWait(ResultCode wait_result_) {
}
}
-void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
+void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1176,6 +1169,10 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
return host_context;
}
+void SetCurrentThread(KernelCore& kernel, KThread* thread) {
+ kernel.SetCurrentEmuThread(thread);
+}
+
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
return kernel.GetCurrentEmuThread();
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 8c1f8a344..28cd7ecb0 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -106,6 +106,7 @@ enum class StepState : u32 {
StepPerformed, ///< Thread has stepped, waiting to be scheduled again
};
+void SetCurrentThread(KernelCore& kernel, KThread* thread);
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
@@ -175,7 +176,7 @@ public:
void SetBasePriority(s32 value);
- [[nodiscard]] ResultCode Run();
+ [[nodiscard]] Result Run();
void Exit();
@@ -217,11 +218,11 @@ public:
return synced_index;
}
- constexpr void SetWaitResult(ResultCode wait_res) {
+ constexpr void SetWaitResult(Result wait_res) {
wait_result = wait_res;
}
- [[nodiscard]] constexpr ResultCode GetWaitResult() const {
+ [[nodiscard]] constexpr Result GetWaitResult() const {
return wait_result;
}
@@ -344,15 +345,15 @@ public:
return physical_affinity_mask;
}
- [[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] ResultCode SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
+ [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
- [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
+ [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity);
- [[nodiscard]] ResultCode Sleep(s64 timeout);
+ [[nodiscard]] Result Sleep(s64 timeout);
[[nodiscard]] s64 GetYieldScheduleCount() const {
return schedule_count;
@@ -410,20 +411,19 @@ public:
static void PostDestroy(uintptr_t arg);
- [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
+ [[nodiscard]] static Result InitializeDummyThread(KThread* thread);
- [[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread,
- s32 virt_core);
+ [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system,
- KThread* thread,
- KThreadFunction func,
- uintptr_t arg, s32 virt_core);
+ [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner);
+ [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 virt_core,
+ KProcess* owner);
public:
struct StackParameters {
@@ -609,7 +609,7 @@ public:
void RemoveWaiter(KThread* thread);
- [[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
+ [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out);
[[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
@@ -635,9 +635,9 @@ public:
}
void BeginWait(KThreadQueue* queue);
- void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
- void EndWait(ResultCode wait_result_);
- void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
+ void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_);
+ void EndWait(Result wait_result_);
+ void CancelWait(Result wait_result_, bool cancel_timer_task);
[[nodiscard]] bool HasWaiters() const {
return !waiter_list.empty();
@@ -723,14 +723,13 @@ private:
void FinishTermination();
- [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
- s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
+ [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
+ s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
- [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
- uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 core, KProcess* owner, ThreadType type,
- std::function<void(void*)>&& init_func,
- void* init_func_parameter);
+ [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 core, KProcess* owner, ThreadType type,
+ std::function<void()>&& init_func);
static void RestorePriority(KernelCore& kernel_ctx, KThread* thread);
@@ -767,7 +766,7 @@ private:
u32 suspend_request_flags{};
u32 suspend_allowed_flags{};
s32 synced_index{};
- ResultCode wait_result{ResultSuccess};
+ Result wait_result{ResultSuccess};
s32 base_priority{};
s32 physical_ideal_core_id{};
s32 virtual_ideal_core_id{};
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index fbdc40b3a..563560114 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -13,7 +13,7 @@
namespace Kernel {
-ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
+Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
// Set that this process owns us.
m_owner = process;
m_kernel = &kernel;
@@ -35,7 +35,7 @@ ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
return ResultSuccess;
}
-ResultCode KThreadLocalPage::Finalize() {
+Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
ASSERT(phys_addr);
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
index a4fe43ee5..0a7f22680 100644
--- a/src/core/hle/kernel/k_thread_local_page.h
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -34,8 +34,8 @@ public:
return m_virt_addr;
}
- ResultCode Initialize(KernelCore& kernel, KProcess* process);
- ResultCode Finalize();
+ Result Initialize(KernelCore& kernel, KProcess* process);
+ Result Finalize();
VAddr Reserve();
void Release(VAddr addr);
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
index 1c338904a..9f4e081ba 100644
--- a/src/core/hle/kernel/k_thread_queue.cpp
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -9,9 +9,9 @@ namespace Kernel {
void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
[[maybe_unused]] KSynchronizationObject* signaled_object,
- [[maybe_unused]] ResultCode wait_result) {}
+ [[maybe_unused]] Result wait_result) {}
-void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
+void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
@@ -25,8 +25,7 @@ void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
}
-void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) {
+void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
@@ -43,6 +42,6 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
}
void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
- [[maybe_unused]] ResultCode wait_result) {}
+ [[maybe_unused]] Result wait_result) {}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 4a7dbdd47..8d76ece81 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -14,10 +14,9 @@ public:
virtual ~KThreadQueue() = default;
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
- ResultCode wait_result);
- virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
- virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task);
+ Result wait_result);
+ virtual void EndWait(KThread* waiting_thread, Result wait_result);
+ virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);
private:
KernelCore& kernel;
@@ -28,7 +27,7 @@ class KThreadQueueWithoutEndWait : public KThreadQueue {
public:
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
- void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
+ void EndWait(KThread* waiting_thread, Result wait_result) override final;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index 1ed4b0f6f..b0320eb73 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -13,8 +13,8 @@ KTransferMemory::KTransferMemory(KernelCore& kernel_)
KTransferMemory::~KTransferMemory() = default;
-ResultCode KTransferMemory::Initialize(VAddr address_, std::size_t size_,
- Svc::MemoryPermission owner_perm_) {
+Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
+ Svc::MemoryPermission owner_perm_) {
// Set members.
owner = kernel.CurrentProcess();
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 9ad80ba30..85d508ee7 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -7,7 +7,7 @@
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -26,7 +26,7 @@ public:
explicit KTransferMemory(KernelCore& kernel_);
~KTransferMemory() override;
- ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
+ Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
void Finalize() override;
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
index 26c8489ad..ff88c5acd 100644
--- a/src/core/hle/kernel/k_writable_event.cpp
+++ b/src/core/hle/kernel/k_writable_event.cpp
@@ -18,11 +18,11 @@ void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) {
parent->GetReadableEvent().Open();
}
-ResultCode KWritableEvent::Signal() {
+Result KWritableEvent::Signal() {
return parent->GetReadableEvent().Signal();
}
-ResultCode KWritableEvent::Clear() {
+Result KWritableEvent::Clear() {
return parent->GetReadableEvent().Clear();
}
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
index e289e80c4..3fd0c7d0a 100644
--- a/src/core/hle/kernel/k_writable_event.h
+++ b/src/core/hle/kernel/k_writable_event.h
@@ -25,8 +25,8 @@ public:
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
void Initialize(KEvent* parent_, std::string&& name_);
- ResultCode Signal();
- ResultCode Clear();
+ Result Signal();
+ Result Clear();
KEvent* GetParent() const {
return parent;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 94953e257..0009193be 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -331,6 +331,8 @@ struct KernelCore::Impl {
return is_shutting_down.load(std::memory_order_relaxed);
}
+ static inline thread_local KThread* current_thread{nullptr};
+
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
@@ -341,7 +343,12 @@ struct KernelCore::Impl {
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
}
- return schedulers[thread_id]->GetCurrentThread();
+
+ return current_thread;
+ }
+
+ void SetCurrentEmuThread(KThread* thread) {
+ current_thread = thread;
}
void DeriveInitialMemoryLayout() {
@@ -1024,6 +1031,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
return impl->GetCurrentEmuThread();
}
+void KernelCore::SetCurrentEmuThread(KThread* thread) {
+ impl->SetCurrentEmuThread(thread);
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 4e7beab0e..aa0ebaa02 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -226,6 +226,9 @@ public:
/// Gets the current host_thread/guest_thread pointer.
KThread* GetCurrentEmuThread() const;
+ /// Sets the current guest_thread pointer.
+ void SetCurrentEmuThread(KThread* thread);
+
/// Gets the current host_thread handle.
u32 GetCurrentHostThreadID() const;
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 54872626e..773319ad8 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -68,9 +68,9 @@ u32 GetFlagBitOffset(CapabilityType type) {
} // Anonymous namespace
-ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
+ std::size_t num_capabilities,
+ KPageTable& page_table) {
Clear();
// Allow all cores and priorities.
@@ -81,9 +81,9 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti
return ParseCapabilities(capabilities, num_capabilities, page_table);
}
-ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
+ std::size_t num_capabilities,
+ KPageTable& page_table) {
Clear();
return ParseCapabilities(capabilities, num_capabilities, page_table);
@@ -107,9 +107,8 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
can_force_debug = true;
}
-ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table) {
u32 set_flags = 0;
u32 set_svc_bits = 0;
@@ -155,8 +154,8 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
return ResultSuccess;
}
-ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
- u32 flag, KPageTable& page_table) {
+Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
+ KPageTable& page_table) {
const auto type = GetCapabilityType(flag);
if (type == CapabilityType::Unset) {
@@ -224,7 +223,7 @@ void ProcessCapabilities::Clear() {
can_force_debug = false;
}
-ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
+Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) {
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
priority_mask, core_mask);
@@ -266,7 +265,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
+Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
const u32 index = flags >> 29;
const u32 svc_bit = 1U << index;
@@ -290,23 +289,23 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
- KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
+ KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
+Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
constexpr u32 interrupt_ignore_value = 0x3FF;
const u32 interrupt0 = (flags >> 12) & 0x3FF;
const u32 interrupt1 = (flags >> 22) & 0x3FF;
@@ -333,7 +332,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
+Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
@@ -344,7 +343,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
+Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
// Yes, the internal member variable is checked in the actual kernel here.
// This might look odd for options that are only allowed to be initialized
// just once, however the kernel has a separate initialization function for
@@ -364,7 +363,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
+Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
@@ -375,7 +374,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
+Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
index 7f3a2339d..ff05dc5ff 100644
--- a/src/core/hle/kernel/process_capability.h
+++ b/src/core/hle/kernel/process_capability.h
@@ -7,7 +7,7 @@
#include "common/common_types.h"
-union ResultCode;
+union Result;
namespace Kernel {
@@ -86,8 +86,8 @@ public:
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
/// otherwise, an error code upon failure.
///
- ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Initializes this process capabilities instance for a userland process.
///
@@ -99,8 +99,8 @@ public:
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
/// otherwise, an error code upon failure.
///
- ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Initializes this process capabilities instance for a process that does not
/// have any metadata to parse.
@@ -185,8 +185,8 @@ private:
///
/// @return ResultSuccess if no errors occur, otherwise an error code.
///
- ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Attempts to parse a capability descriptor that is only represented by a
/// single flag set.
@@ -200,8 +200,8 @@ private:
///
/// @return ResultSuccess if no errors occurred, otherwise an error code.
///
- ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table);
+ Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
+ KPageTable& page_table);
/// Clears the internal state of this process capability instance. Necessary,
/// to have a sane starting point due to us allowing running executables without
@@ -219,34 +219,34 @@ private:
void Clear();
/// Handles flags related to the priority and core number capability flags.
- ResultCode HandlePriorityCoreNumFlags(u32 flags);
+ Result HandlePriorityCoreNumFlags(u32 flags);
/// Handles flags related to determining the allowable SVC mask.
- ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
+ Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
/// Handles flags related to mapping physical memory pages.
- ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
+ Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
/// Handles flags related to mapping IO pages.
- ResultCode HandleMapIOFlags(u32 flags, KPageTable& page_table);
+ Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
/// Handles flags related to mapping physical memory regions.
- ResultCode HandleMapRegionFlags(u32 flags, KPageTable& page_table);
+ Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
/// Handles flags related to the interrupt capability flags.
- ResultCode HandleInterruptFlags(u32 flags);
+ Result HandleInterruptFlags(u32 flags);
/// Handles flags related to the program type.
- ResultCode HandleProgramTypeFlags(u32 flags);
+ Result HandleProgramTypeFlags(u32 flags);
/// Handles flags related to the handle table size.
- ResultCode HandleHandleTableFlags(u32 flags);
+ Result HandleHandleTableFlags(u32 flags);
/// Handles flags related to the kernel version capability flags.
- ResultCode HandleKernelVersionFlags(u32 flags);
+ Result HandleKernelVersionFlags(u32 flags);
/// Handles flags related to debug-specific capabilities.
- ResultCode HandleDebugFlags(u32 flags);
+ Result HandleDebugFlags(u32 flags);
SyscallCapabilities svc_capabilities;
InterruptCapabilities interrupt_capabilities;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 2ff6d5fa6..8655506b0 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -58,8 +58,8 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
-ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
- u64 size) {
+Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
+ u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
return ResultInvalidAddress;
@@ -135,7 +135,7 @@ enum class ResourceLimitValueType {
} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
-static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
+static Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
// Validate size.
@@ -148,9 +148,9 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size
return ResultSuccess;
}
-static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
+static Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
VAddr temp_heap_addr{};
- const ResultCode result{SetHeapSize(system, &temp_heap_addr, heap_size)};
+ const Result result{SetHeapSize(system, &temp_heap_addr, heap_size)};
*heap_addr = static_cast<u32>(temp_heap_addr);
return result;
}
@@ -166,8 +166,8 @@ constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
}
}
-static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 size,
- MemoryPermission perm) {
+static Result SetMemoryPermission(Core::System& system, VAddr address, u64 size,
+ MemoryPermission perm) {
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
perm);
@@ -188,8 +188,8 @@ static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 s
return page_table.SetMemoryPermission(address, size, perm);
}
-static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
- u32 attr) {
+static Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
+ u32 attr) {
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attr);
@@ -213,19 +213,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
return page_table.SetMemoryAttribute(address, size, mask, attr);
}
-static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
- u32 attr) {
+static Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
+ u32 attr) {
return SetMemoryAttribute(system, address, size, mask, attr);
}
/// Maps a memory range into a different range.
-static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+static Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
@@ -233,18 +233,18 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
return page_table.MapMemory(dst_addr, src_addr, size);
}
-static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+static Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return MapMemory(system, dst_addr, src_addr, size);
}
/// Unmaps a region that was previously mapped with svcMapMemory
-static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+static Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
@@ -252,12 +252,12 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
return page_table.UnmapMemory(dst_addr, src_addr, size);
}
-static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return UnmapMemory(system, dst_addr, src_addr, size);
}
/// Connect to an OS service given the port name, returns the handle to the port to out
-static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
+static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
auto& memory = system.Memory();
if (!memory.IsValidVirtualAddress(port_name_address)) {
LOG_ERROR(Kernel_SVC,
@@ -307,14 +307,14 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr po
return ResultSuccess;
}
-static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
- u32 port_name_address) {
+static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
+ u32 port_name_address) {
return ConnectToNamedPort(system, out_handle, port_name_address);
}
/// Makes a blocking IPC call to an OS service.
-static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
+static Result SendSyncRequest(Core::System& system, Handle handle) {
auto& kernel = system.Kernel();
// Create the wait queue.
@@ -327,7 +327,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
- auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLock lock(kernel);
@@ -337,15 +336,15 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
}
- return thread->GetWaitResult();
+ return GetCurrentThread(kernel).GetWaitResult();
}
-static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
+static Result SendSyncRequest32(Core::System& system, Handle handle) {
return SendSyncRequest(system, handle);
}
/// Get the ID for the specified thread.
-static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
+static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
// Get the thread from its handle.
KScopedAutoObject thread =
system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
@@ -356,10 +355,10 @@ static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle t
return ResultSuccess;
}
-static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
- u32* out_thread_id_high, Handle thread_handle) {
+static Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
+ Handle thread_handle) {
u64 out_thread_id{};
- const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
+ const Result result{GetThreadId(system, &out_thread_id, thread_handle)};
*out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
*out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
@@ -368,7 +367,7 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
}
/// Gets the ID of the specified process or a specified thread's owning process.
-static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
+static Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
// Get the object from the handle table.
@@ -399,8 +398,8 @@ static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle
return ResultSuccess;
}
-static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
- u32* out_process_id_high, Handle handle) {
+static Result GetProcessId32(Core::System& system, u32* out_process_id_low,
+ u32* out_process_id_high, Handle handle) {
u64 out_process_id{};
const auto result = GetProcessId(system, &out_process_id, handle);
*out_process_id_low = static_cast<u32>(out_process_id);
@@ -409,8 +408,8 @@ static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
}
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
- s32 num_handles, s64 nano_seconds) {
+static Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
+ s32 num_handles, s64 nano_seconds) {
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
handles_address, num_handles, nano_seconds);
@@ -445,14 +444,14 @@ static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr ha
nano_seconds);
}
-static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
- s32 num_handles, u32 timeout_high, s32* index) {
+static Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
+ s32 num_handles, u32 timeout_high, s32* index) {
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
}
/// Resumes a thread waiting on WaitSynchronization
-static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
+static Result CancelSynchronization(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
// Get the thread from its handle.
@@ -465,13 +464,12 @@ static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
return ResultSuccess;
}
-static ResultCode CancelSynchronization32(Core::System& system, Handle handle) {
+static Result CancelSynchronization32(Core::System& system, Handle handle) {
return CancelSynchronization(system, handle);
}
/// Attempts to locks a mutex
-static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
- u32 tag) {
+static Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
thread_handle, address, tag);
@@ -489,13 +487,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAdd
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
}
-static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
- u32 tag) {
+static Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
return ArbitrateLock(system, thread_handle, address, tag);
}
/// Unlock a mutex
-static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
+static Result ArbitrateUnlock(Core::System& system, VAddr address) {
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
// Validate the input address.
@@ -513,7 +510,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
return system.Kernel().CurrentProcess()->SignalToAddress(address);
}
-static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
+static Result ArbitrateUnlock32(Core::System& system, u32 address) {
return ArbitrateUnlock(system, address);
}
@@ -624,7 +621,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
}
@@ -656,8 +653,8 @@ static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
}
/// Gets system/memory information for the current process
-static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
- u64 info_sub_id) {
+static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
+ u64 info_sub_id) {
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
info_sub_id, handle);
@@ -692,6 +689,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
// 6.0.0+
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
+
+ // Homebrew only
+ MesosphereCurrentProcess = 65001,
};
const auto info_id_type = static_cast<GetInfoType>(info_id);
@@ -884,7 +884,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
const auto& core_timing = system.CoreTiming();
const auto& scheduler = *system.Kernel().CurrentScheduler();
- const auto* const current_thread = scheduler.GetCurrentThread();
+ const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const bool same_thread = current_thread == thread.GetPointerUnsafe();
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
@@ -914,18 +914,39 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
*result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
return ResultSuccess;
}
+ case GetInfoType::MesosphereCurrentProcess: {
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+
+ // Verify the sub-type is valid.
+ R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
+
+ // Get the handle table.
+ KProcess* current_process = system.Kernel().CurrentProcess();
+ KHandleTable& handle_table = current_process->GetHandleTable();
+
+ // Get a new handle for the current process.
+ Handle tmp;
+ R_TRY(handle_table.Add(&tmp, current_process));
+
+ // Set the output.
+ *result = tmp;
+
+ // We succeeded.
+ return ResultSuccess;
+ }
default:
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ResultInvalidEnumValue;
}
}
-static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
- u32 info_id, u32 handle, u32 sub_id_high) {
+static Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
+ u32 info_id, u32 handle, u32 sub_id_high) {
const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
u64 res_value{};
- const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
+ const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
*result_high = static_cast<u32>(res_value >> 32);
*result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
@@ -933,7 +954,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
}
/// Maps memory at a desired address
-static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+static Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -981,12 +1002,12 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
return page_table.MapPhysicalMemory(addr, size);
}
-static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+static Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
return MapPhysicalMemory(system, addr, size);
}
/// Unmaps memory previously mapped via MapPhysicalMemory
-static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+static Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -1034,13 +1055,13 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
return page_table.UnmapPhysicalMemory(addr, size);
}
-static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+static Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
return UnmapPhysicalMemory(system, addr, size);
}
/// Sets the thread activity
-static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
- ThreadActivity thread_activity) {
+static Result SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
thread_activity);
@@ -1065,13 +1086,13 @@ static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
- Svc::ThreadActivity thread_activity) {
+static Result SetThreadActivity32(Core::System& system, Handle thread_handle,
+ Svc::ThreadActivity thread_activity) {
return SetThreadActivity(system, thread_handle, thread_activity);
}
/// Gets the thread context
-static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
+static Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
thread_handle);
@@ -1103,7 +1124,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
if (thread->GetRawState() != ThreadState::Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) {
+ if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
current = true;
break;
}
@@ -1128,12 +1149,12 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
return ResultSuccess;
}
-static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
+static Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
return GetThreadContext(system, out_context, thread_handle);
}
/// Gets the priority for the specified thread
-static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
+static Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
LOG_TRACE(Kernel_SVC, "called");
// Get the thread from its handle.
@@ -1146,12 +1167,12 @@ static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Han
return ResultSuccess;
}
-static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
+static Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
return GetThreadPriority(system, out_priority, handle);
}
/// Sets the priority for the specified thread
-static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
+static Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
// Get the current process.
KProcess& process = *system.Kernel().CurrentProcess();
@@ -1169,7 +1190,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
+static Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
return SetThreadPriority(system, thread_handle, priority);
}
@@ -1229,8 +1250,8 @@ constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission p
} // Anonymous namespace
-static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size, Svc::MemoryPermission map_perm) {
+static Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
+ Svc::MemoryPermission map_perm) {
LOG_TRACE(Kernel_SVC,
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
shmem_handle, address, size, map_perm);
@@ -1270,13 +1291,13 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAd
return ResultSuccess;
}
-static ResultCode MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size, Svc::MemoryPermission map_perm) {
+static Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
+ Svc::MemoryPermission map_perm) {
return MapSharedMemory(system, shmem_handle, address, size, map_perm);
}
-static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size) {
+static Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
+ u64 size) {
// Validate the address/size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
@@ -1303,13 +1324,13 @@ static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, V
return ResultSuccess;
}
-static ResultCode UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size) {
+static Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
+ u32 size) {
return UnmapSharedMemory(system, shmem_handle, address, size);
}
-static ResultCode SetProcessMemoryPermission(Core::System& system, Handle process_handle,
- VAddr address, u64 size, Svc::MemoryPermission perm) {
+static Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
+ u64 size, Svc::MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
process_handle, address, size, perm);
@@ -1338,8 +1359,8 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
return page_table.SetProcessMemoryPermission(address, size, perm);
}
-static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
@@ -1368,7 +1389,7 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
ResultInvalidMemoryRegion);
// Create a new page group.
- KPageLinkedList pg;
+ KPageGroup pg;
R_TRY(src_pt.MakeAndOpenPageGroup(
std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
@@ -1381,8 +1402,8 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
return ResultSuccess;
}
-static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+static Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
@@ -1416,7 +1437,7 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
return ResultSuccess;
}
-static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
+static Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
// Get kernel instance.
@@ -1451,12 +1472,12 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr
return ResultSuccess;
}
-static ResultCode CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
+static Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
return CreateCodeMemory(system, out, address, size);
}
-static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
- VAddr address, size_t size, Svc::MemoryPermission perm) {
+static Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
+ VAddr address, size_t size, Svc::MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
@@ -1534,15 +1555,13 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han
return ResultSuccess;
}
-static ResultCode ControlCodeMemory32(Core::System& system, Handle code_memory_handle,
- u32 operation, u64 address, u64 size,
- Svc::MemoryPermission perm) {
+static Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
+ u64 address, u64 size, Svc::MemoryPermission perm) {
return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
}
-static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, Handle process_handle,
- VAddr address) {
+static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address,
+ VAddr page_info_address, Handle process_handle, VAddr address) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
@@ -1570,8 +1589,8 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
return ResultSuccess;
}
-static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, VAddr query_address) {
+static Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
+ VAddr query_address) {
LOG_TRACE(Kernel_SVC,
"called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
"query_address=0x{:016X}",
@@ -1581,13 +1600,13 @@ static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
query_address);
}
-static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
- u32 page_info_address, u32 query_address) {
+static Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
+ u32 query_address) {
return QueryMemory(system, memory_info_address, page_info_address, query_address);
}
-static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
- u64 src_address, u64 size) {
+static Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
LOG_DEBUG(Kernel_SVC,
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
"src_address=0x{:016X}, size=0x{:016X}",
@@ -1654,8 +1673,8 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
return page_table.MapCodeMemory(dst_address, src_address, size);
}
-static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
- u64 dst_address, u64 src_address, u64 size) {
+static Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
LOG_DEBUG(Kernel_SVC,
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
"size=0x{:016X}",
@@ -1747,8 +1766,8 @@ constexpr bool IsValidVirtualCoreId(int32_t core_id) {
} // Anonymous namespace
/// Creates a new thread
-static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_bottom, u32 priority, s32 core_id) {
+static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
+ VAddr stack_bottom, u32 priority, s32 core_id) {
LOG_DEBUG(Kernel_SVC,
"called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
"priority=0x{:08X}, core_id=0x{:08X}",
@@ -1819,13 +1838,13 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
return ResultSuccess;
}
-static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
- u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
+static Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
+ u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
}
/// Starts the thread for the provided handle
-static ResultCode StartThread(Core::System& system, Handle thread_handle) {
+static Result StartThread(Core::System& system, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
// Get the thread from its handle.
@@ -1843,7 +1862,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
return ResultSuccess;
}
-static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
+static Result StartThread32(Core::System& system, Handle thread_handle) {
return StartThread(system, thread_handle);
}
@@ -1851,7 +1870,7 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
system.Kernel().UnregisterInUseObject(current_thread);
@@ -1894,8 +1913,8 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
}
/// Wait process wide key atomic
-static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
- u32 tag, s64 timeout_ns) {
+static Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
+ s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
cv_key, tag, timeout_ns);
@@ -1930,8 +1949,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address,
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
}
-static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
- u32 timeout_ns_low, u32 timeout_ns_high) {
+static Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
+ u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
}
@@ -1976,8 +1995,8 @@ constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
} // namespace
// Wait for an address (via Address Arbiter)
-static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
- s32 value, s64 timeout_ns) {
+static Result WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
+ s32 value, s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
address, arb_type, value, timeout_ns);
@@ -2014,15 +2033,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::Arbit
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
}
-static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
- s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
+static Result WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
+ s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitForAddress(system, address, arb_type, value, timeout);
}
// Signals to an address (via Address Arbiter)
-static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
- s32 value, s32 count) {
+static Result SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
address, signal_type, value, count);
@@ -2063,8 +2082,8 @@ static void SynchronizePreemptionState(Core::System& system) {
}
}
-static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
- s32 value, s32 count) {
+static Result SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
return SignalToAddress(system, address, signal_type, value, count);
}
@@ -2102,7 +2121,7 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high)
}
/// Close a handle
-static ResultCode CloseHandle(Core::System& system, Handle handle) {
+static Result CloseHandle(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
// Remove the handle.
@@ -2112,12 +2131,12 @@ static ResultCode CloseHandle(Core::System& system, Handle handle) {
return ResultSuccess;
}
-static ResultCode CloseHandle32(Core::System& system, Handle handle) {
+static Result CloseHandle32(Core::System& system, Handle handle) {
return CloseHandle(system, handle);
}
/// Clears the signaled state of an event or process.
-static ResultCode ResetSignal(Core::System& system, Handle handle) {
+static Result ResetSignal(Core::System& system, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
// Get the current handle table.
@@ -2144,7 +2163,7 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
return ResultInvalidHandle;
}
-static ResultCode ResetSignal32(Core::System& system, Handle handle) {
+static Result ResetSignal32(Core::System& system, Handle handle) {
return ResetSignal(system, handle);
}
@@ -2164,8 +2183,8 @@ constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
} // Anonymous namespace
/// Creates a TransferMemory object
-static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
- MemoryPermission map_perm) {
+static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
+ MemoryPermission map_perm) {
auto& kernel = system.Kernel();
// Validate the size.
@@ -2211,13 +2230,13 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr
return ResultSuccess;
}
-static ResultCode CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
- MemoryPermission map_perm) {
+static Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
+ MemoryPermission map_perm) {
return CreateTransferMemory(system, out, address, size, map_perm);
}
-static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
- u64* out_affinity_mask) {
+static Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u64* out_affinity_mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
// Get the thread from its handle.
@@ -2231,8 +2250,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
- u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
+static Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
u64 out_affinity_mask{};
const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
*out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
@@ -2240,8 +2259,8 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle
return result;
}
-static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
- u64 affinity_mask) {
+static Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
+ u64 affinity_mask) {
// Determine the core id/affinity mask.
if (core_id == IdealCoreUseProcessValue) {
core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
@@ -2272,13 +2291,13 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
- u32 affinity_mask_low, u32 affinity_mask_high) {
+static Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
+ u32 affinity_mask_low, u32 affinity_mask_high) {
const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
}
-static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
+static Result SignalEvent(Core::System& system, Handle event_handle) {
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
@@ -2291,11 +2310,11 @@ static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
return writable_event->Signal();
}
-static ResultCode SignalEvent32(Core::System& system, Handle event_handle) {
+static Result SignalEvent32(Core::System& system, Handle event_handle) {
return SignalEvent(system, event_handle);
}
-static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
+static Result ClearEvent(Core::System& system, Handle event_handle) {
LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
@@ -2322,11 +2341,11 @@ static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
return ResultInvalidHandle;
}
-static ResultCode ClearEvent32(Core::System& system, Handle event_handle) {
+static Result ClearEvent32(Core::System& system, Handle event_handle) {
return ClearEvent(system, event_handle);
}
-static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
+static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
LOG_DEBUG(Kernel_SVC, "called");
// Get the kernel reference and handle table.
@@ -2371,11 +2390,11 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
return ResultSuccess;
}
-static ResultCode CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
+static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
return CreateEvent(system, out_write, out_read);
}
-static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
+static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
// This function currently only allows retrieving a process' status.
@@ -2401,7 +2420,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
return ResultSuccess;
}
-static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
+static Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
LOG_DEBUG(Kernel_SVC, "called");
// Create a new resource limit.
@@ -2424,9 +2443,8 @@ static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle)
return ResultSuccess;
}
-static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
- Handle resource_limit_handle,
- LimitableResource which) {
+static Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
which);
@@ -2445,9 +2463,8 @@ static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limi
return ResultSuccess;
}
-static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
- Handle resource_limit_handle,
- LimitableResource which) {
+static Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
+ Handle resource_limit_handle, LimitableResource which) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
which);
@@ -2466,8 +2483,8 @@ static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_cu
return ResultSuccess;
}
-static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
- LimitableResource which, u64 limit_value) {
+static Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, u64 limit_value) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
resource_limit_handle, which, limit_value);
@@ -2486,8 +2503,8 @@ static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resour
return ResultSuccess;
}
-static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
- VAddr out_process_ids, u32 out_process_ids_size) {
+static Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
+ u32 out_process_ids_size) {
LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
out_process_ids, out_process_ids_size);
@@ -2523,8 +2540,8 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
return ResultSuccess;
}
-static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
- u32 out_thread_ids_size, Handle debug_handle) {
+static Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
+ u32 out_thread_ids_size, Handle debug_handle) {
// TODO: Handle this case when debug events are supported.
UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
@@ -2563,9 +2580,9 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return ResultSuccess;
}
-static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
- [[maybe_unused]] Handle handle,
- [[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
+static Result FlushProcessDataCache32([[maybe_unused]] Core::System& system,
+ [[maybe_unused]] Handle handle, [[maybe_unused]] u32 address,
+ [[maybe_unused]] u32 size) {
// Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
// as all emulation is done in the same cache level in host architecture, thus data cache
// does not need flushing.
@@ -2993,7 +3010,7 @@ void Call(Core::System& system, u32 immediate) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
+ auto* thread = GetCurrentThreadPointer(kernel);
thread->SetIsCallingSvc();
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index fab12d070..f27cade33 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -9,34 +9,34 @@ namespace Kernel {
// Confirmed Switch kernel error codes
-constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7};
-constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14};
-constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
-constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
-constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
-constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
-constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
-constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
-constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105};
-constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
-constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
-constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
-constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
-constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
-constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
-constexpr ResultCode ResultInvalidPointer{ErrorModule::Kernel, 115};
-constexpr ResultCode ResultInvalidCombination{ErrorModule::Kernel, 116};
-constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
-constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
-constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
-constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
-constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
-constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
-constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123};
-constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
-constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126};
-constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131};
-constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132};
-constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519};
+constexpr Result ResultOutOfSessions{ErrorModule::Kernel, 7};
+constexpr Result ResultInvalidArgument{ErrorModule::Kernel, 14};
+constexpr Result ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
+constexpr Result ResultTerminationRequested{ErrorModule::Kernel, 59};
+constexpr Result ResultInvalidSize{ErrorModule::Kernel, 101};
+constexpr Result ResultInvalidAddress{ErrorModule::Kernel, 102};
+constexpr Result ResultOutOfResource{ErrorModule::Kernel, 103};
+constexpr Result ResultOutOfMemory{ErrorModule::Kernel, 104};
+constexpr Result ResultOutOfHandles{ErrorModule::Kernel, 105};
+constexpr Result ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
+constexpr Result ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
+constexpr Result ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
+constexpr Result ResultInvalidPriority{ErrorModule::Kernel, 112};
+constexpr Result ResultInvalidCoreId{ErrorModule::Kernel, 113};
+constexpr Result ResultInvalidHandle{ErrorModule::Kernel, 114};
+constexpr Result ResultInvalidPointer{ErrorModule::Kernel, 115};
+constexpr Result ResultInvalidCombination{ErrorModule::Kernel, 116};
+constexpr Result ResultTimedOut{ErrorModule::Kernel, 117};
+constexpr Result ResultCancelled{ErrorModule::Kernel, 118};
+constexpr Result ResultOutOfRange{ErrorModule::Kernel, 119};
+constexpr Result ResultInvalidEnumValue{ErrorModule::Kernel, 120};
+constexpr Result ResultNotFound{ErrorModule::Kernel, 121};
+constexpr Result ResultBusy{ErrorModule::Kernel, 122};
+constexpr Result ResultSessionClosed{ErrorModule::Kernel, 123};
+constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
+constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
+constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
+constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
+constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 2271bb80c..4bc49087e 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -33,24 +33,24 @@ static inline void FuncReturn32(Core::System& system, u32 result) {
}
////////////////////////////////////////////////////////////////////////////////////////////////////
-// Function wrappers that return type ResultCode
+// Function wrappers that return type Result
-template <ResultCode func(Core::System&, u64)>
+template <Result func(Core::System&, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0)).raw);
}
-template <ResultCode func(Core::System&, u64, u64)>
+template <Result func(Core::System&, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
}
-template <ResultCode func(Core::System&, u32)>
+template <Result func(Core::System&, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
}
-template <ResultCode func(Core::System&, u32, u32)>
+template <Result func(Core::System&, u32, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -58,14 +58,14 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetThreadActivity
-template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::ThreadActivity>(Param(system, 1)))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u64, u64)>
+template <Result func(Core::System&, u32, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
Param(system, 2), Param(system, 3))
@@ -73,7 +73,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by MapProcessMemory and UnmapProcessMemory
-template <ResultCode func(Core::System&, u64, u32, u64, u64)>
+template <Result func(Core::System&, u64, u32, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
Param(system, 2), Param(system, 3))
@@ -81,7 +81,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by ControlCodeMemory
-template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
@@ -89,7 +89,7 @@ void SvcWrap64(Core::System& system) {
.raw);
}
-template <ResultCode func(Core::System&, u32*)>
+template <Result func(Core::System&, u32*)>
void SvcWrap64(Core::System& system) {
u32 param = 0;
const u32 retval = func(system, &param).raw;
@@ -97,7 +97,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u32)>
+template <Result func(Core::System&, u32*, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
@@ -105,7 +105,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u32*)>
+template <Result func(Core::System&, u32*, u32*)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -118,7 +118,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64)>
+template <Result func(Core::System&, u32*, u64)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1)).raw;
@@ -126,7 +126,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64, u32)>
+template <Result func(Core::System&, u32*, u64, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval =
@@ -136,7 +136,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64*, u32)>
+template <Result func(Core::System&, u64*, u32)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
@@ -145,12 +145,12 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u32)>
+template <Result func(Core::System&, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw);
}
-template <ResultCode func(Core::System&, u64*, u64)>
+template <Result func(Core::System&, u64*, u64)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1)).raw;
@@ -159,7 +159,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64*, u32, u32)>
+template <Result func(Core::System&, u64*, u32, u32)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1)),
@@ -171,7 +171,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetResourceLimitLimitValue.
-template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)>
+template <Result func(Core::System&, u64*, Handle, LimitableResource)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
@@ -182,13 +182,13 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32, u64)>
+template <Result func(Core::System&, u32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
}
// Used by SetResourceLimitLimitValue
-template <ResultCode func(Core::System&, Handle, LimitableResource, u64)>
+template <Result func(Core::System&, Handle, LimitableResource, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
@@ -196,7 +196,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetThreadCoreMask
-template <ResultCode func(Core::System&, Handle, s32, u64)>
+template <Result func(Core::System&, Handle, s32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
static_cast<s32>(Param(system, 1)), Param(system, 2))
@@ -204,44 +204,44 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetThreadCoreMask
-template <ResultCode func(Core::System&, Handle, s32*, u64*)>
+template <Result func(Core::System&, Handle, s32*, u64*)>
void SvcWrap64(Core::System& system) {
s32 param_1 = 0;
u64 param_2 = 0;
- const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
+ const Result retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
system.CurrentArmInterface().SetReg(1, param_1);
system.CurrentArmInterface().SetReg(2, param_2);
FuncReturn(system, retval.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32, u32)>
+template <Result func(Core::System&, u64, u64, u32, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32, u64)>
+template <Result func(Core::System&, u64, u64, u32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), Param(system, 3))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u32)>
+template <Result func(Core::System&, u32, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
static_cast<u32>(Param(system, 2)))
.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u64)>
+template <Result func(Core::System&, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1), Param(system, 2)).raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32)>
+template <Result func(Core::System&, u64, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -249,7 +249,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetMemoryPermission
-template <ResultCode func(Core::System&, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<Svc::MemoryPermission>(Param(system, 2)))
@@ -257,14 +257,14 @@ void SvcWrap64(Core::System& system) {
}
// Used by MapSharedMemory
-template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u64)>
+template <Result func(Core::System&, u32, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -272,7 +272,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by WaitSynchronization
-template <ResultCode func(Core::System&, s32*, u64, s32, s64)>
+template <Result func(Core::System&, s32*, u64, s32, s64)>
void SvcWrap64(Core::System& system) {
s32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<s32>(Param(system, 2)),
@@ -283,7 +283,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u64, u32, s64)>
+template <Result func(Core::System&, u64, u64, u32, s64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
@@ -291,7 +291,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetInfo
-template <ResultCode func(Core::System&, u64*, u64, Handle, u64)>
+template <Result func(Core::System&, u64*, u64, Handle, u64)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1),
@@ -302,7 +302,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64, u64, u64, u32, s32)>
+template <Result func(Core::System&, u32*, u64, u64, u64, u32, s32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2), Param(system, 3),
@@ -314,7 +314,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by CreateTransferMemory
-template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2),
@@ -326,7 +326,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by CreateCodeMemory
-template <ResultCode func(Core::System&, Handle*, u64, u64)>
+template <Result func(Core::System&, Handle*, u64, u64)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2)).raw;
@@ -335,7 +335,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
+template <Result func(Core::System&, Handle*, u64, u32, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
@@ -347,7 +347,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by WaitForAddress
-template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
+template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system,
func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
@@ -356,7 +356,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SignalToAddress
-template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
+template <Result func(Core::System&, u64, Svc::SignalType, s32, s32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system,
func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
@@ -425,7 +425,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by QueryMemory32, ArbitrateLock32
-template <ResultCode func(Core::System&, u32, u32, u32)>
+template <Result func(Core::System&, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn32(system,
func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
@@ -456,7 +456,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateThread32
-template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
+template <Result func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
void SvcWrap32(Core::System& system) {
Handle param_1 = 0;
@@ -469,7 +469,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetInfo32
-template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
+template <Result func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -484,7 +484,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadPriority32, ConnectToNamedPort32
-template <ResultCode func(Core::System&, u32*, u32)>
+template <Result func(Core::System&, u32*, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param32(system, 1)).raw;
@@ -493,7 +493,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadId32
-template <ResultCode func(Core::System&, u32*, u32*, u32)>
+template <Result func(Core::System&, u32*, u32*, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -516,7 +516,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateEvent32
-template <ResultCode func(Core::System&, Handle*, Handle*)>
+template <Result func(Core::System&, Handle*, Handle*)>
void SvcWrap32(Core::System& system) {
Handle param_1 = 0;
Handle param_2 = 0;
@@ -528,7 +528,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadId32
-template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
+template <Result func(Core::System&, Handle, u32*, u32*, u32*)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -542,7 +542,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadCoreMask32
-template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
+template <Result func(Core::System&, Handle, s32*, u32*, u32*)>
void SvcWrap32(Core::System& system) {
s32 param_1 = 0;
u32 param_2 = 0;
@@ -562,7 +562,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadActivity32
-template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<Svc::ThreadActivity>(Param(system, 1)))
@@ -571,7 +571,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadPriority32
-template <ResultCode func(Core::System&, Handle, u32)>
+template <Result func(Core::System&, Handle, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
@@ -579,7 +579,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetMemoryAttribute32
-template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
+template <Result func(Core::System&, Handle, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
@@ -589,7 +589,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by MapSharedMemory32
-template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
@@ -599,7 +599,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadCoreMask32
-template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
+template <Result func(Core::System&, Handle, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
@@ -609,7 +609,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitProcessWideKeyAtomic32
-template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
+template <Result func(Core::System&, u32, u32, Handle, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
@@ -620,7 +620,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitForAddress32
-template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
+template <Result func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::ArbitrationType>(Param(system, 1)),
@@ -631,7 +631,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SignalToAddress32
-template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
+template <Result func(Core::System&, u32, Svc::SignalType, s32, s32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::SignalType>(Param(system, 1)),
@@ -641,13 +641,13 @@ void SvcWrap32(Core::System& system) {
}
// Used by SendSyncRequest32, ArbitrateUnlock32
-template <ResultCode func(Core::System&, u32)>
+template <Result func(Core::System&, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
}
// Used by CreateTransferMemory32
-template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
@@ -658,7 +658,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitSynchronization32
-template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
+template <Result func(Core::System&, u32, u32, s32, u32, s32*)>
void SvcWrap32(Core::System& system) {
s32 param_1 = 0;
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
@@ -669,7 +669,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateCodeMemory32
-template <ResultCode func(Core::System&, Handle*, u32, u32)>
+template <Result func(Core::System&, Handle*, u32, u32)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
@@ -680,7 +680,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by ControlCodeMemory32
-template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4),