summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp68
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h18
-rw-r--r--src/core/hle/kernel/code_set.h14
-rw-r--r--src/core/hle/kernel/k_address_space_info.cpp4
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp39
-rw-r--r--src/core/hle/kernel/k_capabilities.h21
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp22
-rw-r--r--src/core/hle/kernel/k_condition_variable.h9
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/k_device_address_space.h10
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_memory_layout.h8
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp121
-rw-r--r--src/core/hle/kernel/k_memory_manager.h12
-rw-r--r--src/core/hle/kernel/k_page_table.cpp3519
-rw-r--r--src/core/hle/kernel/k_page_table.h542
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp5739
-rw-r--r--src/core/hle/kernel/k_page_table_base.h760
-rw-r--r--src/core/hle/kernel/k_process.cpp1453
-rw-r--r--src/core/hle/kernel/k_process.h734
-rw-r--r--src/core/hle/kernel/k_process_page_table.h481
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp4
-rw-r--r--src/core/hle/kernel/k_server_session.cpp2
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp87
-rw-r--r--src/core/hle/kernel/k_thread.cpp28
-rw-r--r--src/core/hle/kernel/k_thread.h17
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/kernel.cpp53
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/physical_core.cpp14
-rw-r--r--src/core/hle/kernel/process_capability.cpp389
-rw-r--r--src/core/hle/kernel/process_capability.h266
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp28
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp13
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp3
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp7
-rw-r--r--src/core/hle/kernel/svc_generator.py2
-rw-r--r--src/core/hle/kernel/svc_types.h46
43 files changed, 8871 insertions, 5697 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 4cfdf4558..37fa39a73 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -8,7 +8,11 @@
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
+#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_trace.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel::Board::Nintendo::Nx {
@@ -30,6 +34,8 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
+constexpr const std::size_t SecureAlignment = 128_KiB;
+
namespace {
using namespace Common::Literals;
@@ -183,4 +189,66 @@ u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
return GenerateUniformRange(min, max, GenerateRandomU64);
}
+size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
+ if (pool == static_cast<u32>(KMemoryManager::Pool::Applet)) {
+ return 0;
+ } else {
+ // return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
+ return size;
+ }
+}
+
+Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
+ u32 pool) {
+ // Applet secure memory is handled separately.
+ UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
+
+ // Ensure the size is aligned.
+ const size_t alignment =
+ (pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
+ R_UNLESS(Common::IsAligned(size, alignment), ResultInvalidSize);
+
+ // Allocate the memory.
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress paddr = kernel.MemoryManager().AllocateAndOpenContinuous(
+ num_pages, alignment / PageSize,
+ KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool),
+ KMemoryManager::Direction::FromFront));
+ R_UNLESS(paddr != 0, ResultOutOfMemory);
+
+ // Ensure we don't leak references to the memory on error.
+ ON_RESULT_FAILURE {
+ kernel.MemoryManager().Close(paddr, num_pages);
+ };
+
+ // We succeeded.
+ *out = KPageTable::GetHeapVirtualAddress(kernel, paddr);
+ R_SUCCEED();
+}
+
+void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
+ u32 pool) {
+ // Applet secure memory is handled separately.
+ UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
+
+ // Ensure the size is aligned.
+ const size_t alignment =
+ (pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
+ ASSERT(Common::IsAligned(GetInteger(address), alignment));
+ ASSERT(Common::IsAligned(size, alignment));
+
+ // Close the secure region's pages.
+ kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address),
+ size / PageSize);
+}
+
+// Insecure Memory.
+KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) {
+ return kernel.GetSystemResourceLimit();
+}
+
+u32 KSystemControl::GetInsecureMemoryPool() {
+ return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure);
+}
+
} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index b477e8193..60c5e58b7 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -4,6 +4,12 @@
#pragma once
#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+class KernelCore;
+class KResourceLimit;
+} // namespace Kernel
namespace Kernel::Board::Nintendo::Nx {
@@ -25,8 +31,20 @@ public:
static std::size_t GetMinimumNonSecureSystemPoolSize();
};
+ // Randomness.
static u64 GenerateRandomRange(u64 min, u64 max);
static u64 GenerateRandomU64();
+
+ // Secure Memory.
+ static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
+ static Result AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
+ u32 pool);
+ static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
+ u32 pool);
+
+ // Insecure Memory.
+ static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel);
+ static u32 GetInsecureMemoryPool();
};
} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index af1af2b78..4d2d0098e 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -75,12 +75,26 @@ struct CodeSet final {
return segments[2];
}
+#ifdef HAS_NCE
+ Segment& PatchSegment() {
+ return patch_segment;
+ }
+
+ const Segment& PatchSegment() const {
+ return patch_segment;
+ }
+#endif
+
/// The overall data that backs this code set.
Kernel::PhysicalMemory memory;
/// The segments that comprise this code set.
std::array<Segment, 3> segments;
+#ifdef HAS_NCE
+ Segment patch_segment;
+#endif
+
/// The entry point address for this code set.
KProcessAddress entrypoint = 0;
};
diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp
index 32173e52b..23258071e 100644
--- a/src/core/hle/kernel/k_address_space_info.cpp
+++ b/src/core/hle/kernel/k_address_space_info.cpp
@@ -25,8 +25,8 @@ constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
{ .bit_width = 36, .address = 2_GiB , .size = 64_GiB - 2_GiB , .type = KAddressSpaceInfo::Type::MapLarge, },
{ .bit_width = 36, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Alias, },
-#ifdef ANDROID
- // With Android, we use a 38-bit address space due to memory limitations. This should (safely) truncate ASLR region.
+#ifdef HAS_NCE
+ // With NCE, we use a 38-bit address space due to memory limitations. This should (safely) truncate ASLR region.
{ .bit_width = 39, .address = 128_MiB , .size = 256_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, },
#else
{ .bit_width = 39, .address = 128_MiB , .size = 512_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, },
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
index e7da7a21d..274fee493 100644
--- a/src/core/hle/kernel/k_capabilities.cpp
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -4,14 +4,16 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
+#include "core/hle/kernel/k_trace.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_version.h"
namespace Kernel {
-Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps,
+ KProcessPageTable* page_table) {
// We're initializing an initial process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
@@ -41,7 +43,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl
R_RETURN(this->SetCapabilities(kern_caps, page_table));
}
-Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForUser(std::span<const u32> user_caps,
+ KProcessPageTable* page_table) {
// We're initializing a user process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
@@ -121,7 +124,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
R_SUCCEED();
}
-Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
+Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) {
const auto range_pack = MapRange{cap};
const auto size_pack = MapRangeSize{size_cap};
@@ -142,16 +145,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p
? KMemoryPermission::UserRead
: KMemoryPermission::UserReadWrite;
if (MapRangeSize{size_cap}.normal) {
- // R_RETURN(page_table->MapStatic(phys_addr, size, perm));
+ R_RETURN(page_table->MapStatic(phys_addr, size, perm));
} else {
- // R_RETURN(page_table->MapIo(phys_addr, size, perm));
+ R_RETURN(page_table->MapIo(phys_addr, size, perm));
}
-
- UNIMPLEMENTED();
- R_SUCCEED();
}
-Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) {
// Get/validate address/size
const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
const size_t num_pages = 1;
@@ -160,10 +160,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
// Do the mapping.
- // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
-
- UNIMPLEMENTED();
- R_SUCCEED();
+ R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite));
}
template <typename F>
@@ -200,13 +197,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
R_SUCCEED();
}
-Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) {
// Map each region into the process's page table.
return ProcessMapRegionCapability(
- cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
- // R_RETURN(page_table->MapRegion(region_type, perm));
- UNIMPLEMENTED();
- R_SUCCEED();
+ cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+ R_RETURN(page_table->MapRegion(region_type, perm));
});
}
@@ -280,7 +275,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
}
Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
- KPageTable* page_table) {
+ KProcessPageTable* page_table) {
// Validate this is a capability we can act on.
const auto type = GetCapabilityType(cap);
R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
@@ -318,7 +313,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
}
}
-Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
+Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) {
u32 set_flags = 0, set_svc = 0;
for (size_t i = 0; i < caps.size(); i++) {
@@ -335,6 +330,8 @@ Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* pag
// Map the range.
R_TRY(this->MapRange_(cap, size_cap, page_table));
+ } else if (GetCapabilityType(cap) == CapabilityType::MapRegion && !IsKTraceEnabled) {
+ continue;
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
index de766c811..013d952ad 100644
--- a/src/core/hle/kernel/k_capabilities.h
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -15,15 +15,15 @@
namespace Kernel {
-class KPageTable;
+class KProcessPageTable;
class KernelCore;
class KCapabilities {
public:
constexpr explicit KCapabilities() = default;
- Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
- Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
+ Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table);
+ Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table);
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
@@ -200,8 +200,8 @@ private:
RawCapabilityValue raw;
BitField<0, 15, CapabilityType> id;
- BitField<15, 4, u32> major_version;
- BitField<19, 13, u32> minor_version;
+ BitField<15, 4, u32> minor_version;
+ BitField<19, 13, u32> major_version;
};
union HandleTable {
@@ -264,9 +264,9 @@ private:
Result SetCorePriorityCapability(const u32 cap);
Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
- Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
- Result MapIoPage_(const u32 cap, KPageTable* page_table);
- Result MapRegion_(const u32 cap, KPageTable* page_table);
+ Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table);
+ Result MapIoPage_(const u32 cap, KProcessPageTable* page_table);
+ Result MapRegion_(const u32 cap, KProcessPageTable* page_table);
Result SetInterruptPairCapability(const u32 cap);
Result SetProgramTypeCapability(const u32 cap);
Result SetKernelVersionCapability(const u32 cap);
@@ -277,8 +277,9 @@ private:
static Result ProcessMapRegionCapability(const u32 cap, F f);
static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
- Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
- Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
+ Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
+ KProcessPageTable* page_table);
+ Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table);
private:
Svc::SvcAccessFlagSet m_svc_access_flags{};
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index efbac0e6a..7633a51fb 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -107,12 +107,12 @@ KConditionVariable::KConditionVariable(Core::System& system)
KConditionVariable::~KConditionVariable() = default;
-Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
- KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
+Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress addr) {
+ KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
- KScopedSchedulerLock sl(m_kernel);
+ KScopedSchedulerLock sl(kernel);
// Remove waiter thread.
bool has_waiters{};
@@ -133,7 +133,7 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
// Write the value to userspace.
Result result{ResultSuccess};
- if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
+ if (WriteToUser(kernel, addr, std::addressof(next_value))) [[likely]] {
result = ResultSuccess;
} else {
result = ResultInvalidCurrentMemory;
@@ -148,28 +148,28 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
}
}
-Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
- KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
- ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
+Result KConditionVariable::WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
+ u32 value) {
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
+ ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.
KThread* owner_thread{};
{
- KScopedSchedulerLock sl(m_kernel);
+ KScopedSchedulerLock sl(kernel);
// Check if the thread should terminate.
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
// Read the tag from userspace.
u32 test_tag{};
- R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
- ResultInvalidCurrentMemory);
+ R_UNLESS(ReadFromUser(kernel, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done.
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
// Get the lock owner thread.
- owner_thread = GetCurrentProcess(m_kernel)
+ owner_thread = GetCurrentProcess(kernel)
.GetHandleTable()
.GetObjectWithoutPseudoHandle<KThread>(handle)
.ReleasePointerUnsafe();
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 8c2f3ae51..2620c8e39 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -24,11 +24,12 @@ public:
explicit KConditionVariable(Core::System& system);
~KConditionVariable();
- // Arbitration
- Result SignalToAddress(KProcessAddress addr);
- Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
+ // Arbitration.
+ static Result SignalToAddress(KernelCore& kernel, KProcessAddress addr);
+ static Result WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
+ u32 value);
- // Condition variable
+ // Condition variable.
void Signal(u64 cv_key, s32 count);
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index f48896715..f0703f795 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
R_SUCCEED();
}
-Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address, u32 option, bool is_aligned) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
@@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_
R_SUCCEED();
}
-Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index 18556e3cc..ff0ec8152 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -5,7 +5,7 @@
#include <string>
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -31,23 +31,23 @@ public:
Result Attach(Svc::DeviceName device_name);
Result Detach(Svc::DeviceName device_name);
- Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
}
- Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
}
- Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address);
static void Initialize();
private:
- Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option, bool is_aligned);
private:
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index fe6a20168..22d79569a 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -22,7 +22,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
KScopedSchedulerLock sl{kernel};
// Pin the current thread.
- process->PinCurrentThread(core_id);
+ process->PinCurrentThread();
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index c8122644f..d7adb3169 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -394,6 +394,14 @@ private:
return region.GetEndAddress();
}
+public:
+ static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) {
+ return Find(address, layout.GetVirtualMemoryRegionTree());
+ }
+ static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) {
+ return Find(address, layout.GetPhysicalMemoryRegionTree());
+ }
+
private:
u64 m_linear_phys_to_virt_diff{};
u64 m_linear_virt_to_phys_diff{};
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 637558e10..0a973ec8c 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -11,6 +11,7 @@
#include "core/hle/kernel/initial_process.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_group.h"
+#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -168,11 +169,37 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
}
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
- UNREACHABLE();
+ const u32 pool_index = static_cast<u32>(pool);
+
+ // Lock the pool.
+ KScopedLightLock lk(m_pool_locks[pool_index]);
+
+ // Check that we don't already have an optimized process.
+ R_UNLESS(!m_has_optimized_process[pool_index], ResultBusy);
+
+ // Set the optimized process id.
+ m_optimized_process_ids[pool_index] = process_id;
+ m_has_optimized_process[pool_index] = true;
+
+ // Clear the management area for the optimized process.
+ for (auto* manager = this->GetFirstManager(pool, Direction::FromFront); manager != nullptr;
+ manager = this->GetNextManager(manager, Direction::FromFront)) {
+ manager->InitializeOptimizedMemory(m_system.Kernel());
+ }
+
+ R_SUCCEED();
}
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
- UNREACHABLE();
+ const u32 pool_index = static_cast<u32>(pool);
+
+ // Lock the pool.
+ KScopedLightLock lk(m_pool_locks[pool_index]);
+
+ // If the process was optimized, clear it.
+ if (m_has_optimized_process[pool_index] && m_optimized_process_ids[pool_index] == process_id) {
+ m_has_optimized_process[pool_index] = false;
+ }
}
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
@@ -207,7 +234,7 @@ KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, siz
// Maintain the optimized memory bitmap, if we should.
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
- UNIMPLEMENTED();
+ chosen_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block, num_pages);
}
// Open the first reference to the pages.
@@ -255,7 +282,8 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
// Maintain the optimized memory bitmap, if we should.
if (unoptimized) {
- UNIMPLEMENTED();
+ cur_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block,
+ pages_per_alloc);
}
num_pages -= pages_per_alloc;
@@ -358,8 +386,8 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Process part or all of the block.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
- any_new =
- manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
+ any_new = manager.ProcessOptimizedAllocation(m_system.Kernel(), cur_address,
+ cur_pages, fill_pattern);
// Advance.
cur_address += cur_pages * PageSize;
@@ -382,7 +410,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Track some or all of the current pages.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
- manager.TrackOptimizedAllocation(cur_address, cur_pages);
+ manager.TrackOptimizedAllocation(m_system.Kernel(), cur_address, cur_pages);
// Advance.
cur_address += cur_pages * PageSize;
@@ -427,17 +455,82 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
return total_management_size;
}
-void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
- UNREACHABLE();
+void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
+
+ std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
}
-void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
- UNREACHABLE();
+void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages) {
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
+
+ // Get the range we're tracking.
+ size_t offset = this->GetPageOffset(block);
+ const size_t last = offset + num_pages - 1;
+
+ // Track.
+ while (offset <= last) {
+ // Mark the page as not being optimized-allocated.
+ optimize_map[offset / Common::BitSize<u64>()] &=
+ ~(u64(1) << (offset % Common::BitSize<u64>()));
+
+ offset++;
+ }
+}
+
+void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages) {
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
+
+ // Get the range we're tracking.
+ size_t offset = this->GetPageOffset(block);
+ const size_t last = offset + num_pages - 1;
+
+ // Track.
+ while (offset <= last) {
+ // Mark the page as being optimized-allocated.
+ optimize_map[offset / Common::BitSize<u64>()] |=
+ (u64(1) << (offset % Common::BitSize<u64>()));
+
+ offset++;
+ }
}
-bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
- u8 fill_pattern) {
- UNREACHABLE();
+bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages, u8 fill_pattern) {
+ auto& device_memory = kernel.System().DeviceMemory();
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
+
+ // We want to return whether any pages were newly allocated.
+ bool any_new = false;
+
+ // Get the range we're processing.
+ size_t offset = this->GetPageOffset(block);
+ const size_t last = offset + num_pages - 1;
+
+ // Process.
+ while (offset <= last) {
+ // Check if the page has been optimized-allocated before.
+ if ((optimize_map[offset / Common::BitSize<u64>()] &
+ (u64(1) << (offset % Common::BitSize<u64>()))) == 0) {
+ // If not, it's new.
+ any_new = true;
+
+ // Fill the page.
+ auto* ptr = device_memory.GetPointer<u8>(m_heap.GetAddress());
+ std::memset(ptr + offset * PageSize, fill_pattern, PageSize);
+ }
+
+ offset++;
+ }
+
+ // Return the number of pages we processed.
+ return any_new;
}
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 7e4b41319..c5a487af9 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -216,14 +216,14 @@ private:
m_heap.SetInitialUsedSize(reserved_size);
}
- void InitializeOptimizedMemory() {
- UNIMPLEMENTED();
- }
+ void InitializeOptimizedMemory(KernelCore& kernel);
- void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
- void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
+ void TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages);
+ void TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages);
- bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
+ bool ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const {
return m_pool;
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
deleted file mode 100644
index 217ccbae3..000000000
--- a/src/core/hle/kernel/k_page_table.cpp
+++ /dev/null
@@ -1,3519 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/literals.h"
-#include "common/scope_exit.h"
-#include "common/settings.h"
-#include "core/core.h"
-#include "core/hle/kernel/k_address_space_info.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_group.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_system_control.h"
-#include "core/hle/kernel/k_system_resource.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-namespace {
-
-class KScopedLightLockPair {
- YUZU_NON_COPYABLE(KScopedLightLockPair);
- YUZU_NON_MOVEABLE(KScopedLightLockPair);
-
-private:
- KLightLock* m_lower;
- KLightLock* m_upper;
-
-public:
- KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
- // Ensure our locks are in a consistent order.
- if (std::addressof(lhs) <= std::addressof(rhs)) {
- m_lower = std::addressof(lhs);
- m_upper = std::addressof(rhs);
- } else {
- m_lower = std::addressof(rhs);
- m_upper = std::addressof(lhs);
- }
-
- // Acquire both locks.
- m_lower->Lock();
- if (m_lower != m_upper) {
- m_upper->Lock();
- }
- }
-
- ~KScopedLightLockPair() {
- // Unlock the upper lock.
- if (m_upper != nullptr && m_upper != m_lower) {
- m_upper->Unlock();
- }
-
- // Unlock the lower lock.
- if (m_lower != nullptr) {
- m_lower->Unlock();
- }
- }
-
-public:
- // Utility.
- void TryUnlockHalf(KLightLock& lock) {
- // Only allow unlocking if the lock is half the pair.
- if (m_lower != m_upper) {
- // We want to be sure the lock is one we own.
- if (m_lower == std::addressof(lock)) {
- lock.Unlock();
- m_lower = nullptr;
- } else if (m_upper == std::addressof(lock)) {
- lock.Unlock();
- m_upper = nullptr;
- }
- }
- }
-};
-
-using namespace Common::Literals;
-
-constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
- switch (as_type) {
- case FileSys::ProgramAddressSpaceType::Is32Bit:
- case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
- return 32;
- case FileSys::ProgramAddressSpaceType::Is36Bit:
- return 36;
- case FileSys::ProgramAddressSpaceType::Is39Bit:
- return 39;
- default:
- ASSERT(false);
- return {};
- }
-}
-
-} // namespace
-
-KPageTable::KPageTable(Core::System& system_)
- : m_general_lock{system_.Kernel()},
- m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
-
-KPageTable::~KPageTable() = default;
-
-Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- bool enable_das_merge, bool from_back,
- KMemoryManager::Pool pool, KProcessAddress code_addr,
- size_t code_size, KSystemResource* system_resource,
- KResourceLimit* resource_limit,
- Core::Memory::Memory& memory) {
-
- const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
- };
- const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
- };
-
- // Set the tracking memory
- m_memory = std::addressof(memory);
-
- // Set our width and heap/alias sizes
- m_address_space_width = GetAddressSpaceWidthFromType(as_type);
- const KProcessAddress start = 0;
- const KProcessAddress end{1ULL << m_address_space_width};
- size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
- size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
-
- ASSERT(code_addr < code_addr + code_size);
- ASSERT(code_addr + code_size - 1 <= end - 1);
-
- // Adjust heap/alias size if we don't have an alias region
- if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
- heap_region_size += alias_region_size;
- alias_region_size = 0;
- }
-
- // Set code regions and determine remaining
- constexpr size_t RegionAlignment{2_MiB};
- KProcessAddress process_code_start{};
- KProcessAddress process_code_end{};
- size_t stack_region_size{};
- size_t kernel_map_region_size{};
-
- if (m_address_space_width == 39) {
- alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
- heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
- stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
- kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
- m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
- m_alias_code_region_start = m_code_region_start;
- m_alias_code_region_end = m_code_region_end;
- process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
- process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
- } else {
- stack_region_size = 0;
- kernel_map_region_size = 0;
- m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
- m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- m_stack_region_start = m_code_region_start;
- m_alias_code_region_start = m_code_region_start;
- m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
- GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
- m_stack_region_end = m_code_region_end;
- m_kernel_map_region_start = m_code_region_start;
- m_kernel_map_region_end = m_code_region_end;
- process_code_start = m_code_region_start;
- process_code_end = m_code_region_end;
- }
-
- // Set other basic fields
- m_enable_aslr = enable_aslr;
- m_enable_device_address_space_merge = enable_das_merge;
- m_address_space_start = start;
- m_address_space_end = end;
- m_is_kernel = false;
- m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
- m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
- m_resource_limit = resource_limit;
-
- // Determine the region we can place our undetermineds in
- KProcessAddress alloc_start{};
- size_t alloc_size{};
- if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
- alloc_start = m_code_region_start;
- alloc_size = process_code_start - m_code_region_start;
- } else {
- alloc_start = process_code_end;
- alloc_size = end - process_code_end;
- }
- const size_t needed_size =
- (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
- R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
-
- const size_t remaining_size{alloc_size - needed_size};
-
- // Determine random placements for each region
- size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
- if (enable_aslr) {
- alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- }
-
- // Setup heap and alias regions
- m_alias_region_start = alloc_start + alias_rnd;
- m_alias_region_end = m_alias_region_start + alias_region_size;
- m_heap_region_start = alloc_start + heap_rnd;
- m_heap_region_end = m_heap_region_start + heap_region_size;
-
- if (alias_rnd <= heap_rnd) {
- m_heap_region_start += alias_region_size;
- m_heap_region_end += alias_region_size;
- } else {
- m_alias_region_start += heap_region_size;
- m_alias_region_end += heap_region_size;
- }
-
- // Setup stack region
- if (stack_region_size) {
- m_stack_region_start = alloc_start + stack_rnd;
- m_stack_region_end = m_stack_region_start + stack_region_size;
-
- if (alias_rnd < stack_rnd) {
- m_stack_region_start += alias_region_size;
- m_stack_region_end += alias_region_size;
- } else {
- m_alias_region_start += stack_region_size;
- m_alias_region_end += stack_region_size;
- }
-
- if (heap_rnd < stack_rnd) {
- m_stack_region_start += heap_region_size;
- m_stack_region_end += heap_region_size;
- } else {
- m_heap_region_start += stack_region_size;
- m_heap_region_end += stack_region_size;
- }
- }
-
- // Setup kernel map region
- if (kernel_map_region_size) {
- m_kernel_map_region_start = alloc_start + kmap_rnd;
- m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
-
- if (alias_rnd < kmap_rnd) {
- m_kernel_map_region_start += alias_region_size;
- m_kernel_map_region_end += alias_region_size;
- } else {
- m_alias_region_start += kernel_map_region_size;
- m_alias_region_end += kernel_map_region_size;
- }
-
- if (heap_rnd < kmap_rnd) {
- m_kernel_map_region_start += heap_region_size;
- m_kernel_map_region_end += heap_region_size;
- } else {
- m_heap_region_start += kernel_map_region_size;
- m_heap_region_end += kernel_map_region_size;
- }
-
- if (stack_region_size) {
- if (stack_rnd < kmap_rnd) {
- m_kernel_map_region_start += stack_region_size;
- m_kernel_map_region_end += stack_region_size;
- } else {
- m_stack_region_start += kernel_map_region_size;
- m_stack_region_end += kernel_map_region_size;
- }
- }
- }
-
- // Set heap and fill members.
- m_current_heap_end = m_heap_region_start;
- m_max_heap_size = 0;
- m_mapped_physical_memory_size = 0;
- m_mapped_unsafe_physical_memory = 0;
- m_mapped_insecure_memory = 0;
- m_mapped_ipc_server_memory = 0;
-
- m_heap_fill_value = 0;
- m_ipc_fill_value = 0;
- m_stack_fill_value = 0;
-
- // Set allocation option.
- m_allocate_option =
- KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
- : KMemoryManager::Direction::FromFront);
-
- // Ensure that we regions inside our address space
- auto IsInAddressSpace = [&](KProcessAddress addr) {
- return m_address_space_start <= addr && addr <= m_address_space_end;
- };
- ASSERT(IsInAddressSpace(m_alias_region_start));
- ASSERT(IsInAddressSpace(m_alias_region_end));
- ASSERT(IsInAddressSpace(m_heap_region_start));
- ASSERT(IsInAddressSpace(m_heap_region_end));
- ASSERT(IsInAddressSpace(m_stack_region_start));
- ASSERT(IsInAddressSpace(m_stack_region_end));
- ASSERT(IsInAddressSpace(m_kernel_map_region_start));
- ASSERT(IsInAddressSpace(m_kernel_map_region_end));
-
- // Ensure that we selected regions that don't overlap
- const KProcessAddress alias_start{m_alias_region_start};
- const KProcessAddress alias_last{m_alias_region_end - 1};
- const KProcessAddress heap_start{m_heap_region_start};
- const KProcessAddress heap_last{m_heap_region_end - 1};
- const KProcessAddress stack_start{m_stack_region_start};
- const KProcessAddress stack_last{m_stack_region_end - 1};
- const KProcessAddress kmap_start{m_kernel_map_region_start};
- const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
- ASSERT(alias_last < heap_start || heap_last < alias_start);
- ASSERT(alias_last < stack_start || stack_last < alias_start);
- ASSERT(alias_last < kmap_start || kmap_last < alias_start);
- ASSERT(heap_last < stack_start || stack_last < heap_start);
- ASSERT(heap_last < kmap_start || kmap_last < heap_start);
-
- m_current_heap_end = m_heap_region_start;
- m_max_heap_size = 0;
- m_mapped_physical_memory_size = 0;
- m_memory_pool = pool;
-
- m_page_table_impl = std::make_unique<Common::PageTable>();
- m_page_table_impl->Resize(m_address_space_width, PageBits);
-
- // Initialize our memory block manager.
- R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
- m_memory_block_slab_manager));
-}
-
-void KPageTable::Finalize() {
- auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
- if (Settings::IsFastmemEnabled()) {
- m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
- }
- };
-
- // Finalize memory blocks.
- m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
-
- // Release any insecure mapped memory.
- if (m_mapped_insecure_memory) {
- UNIMPLEMENTED();
- }
-
- // Release any ipc server memory.
- if (m_mapped_ipc_server_memory) {
- UNIMPLEMENTED();
- }
-
- // Close the backing page table, as the destructor is not called for guest objects.
- m_page_table_impl.reset();
-}
-
-Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- const u64 size{num_pages * PageSize};
-
- // Validate the mapping request.
- R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the destination memory is unmapped.
- R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
-
- // Allocate and open.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, num_pages,
- KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
-
- R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Validate the mapping request.
- R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
- ResultInvalidMemoryRegion);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the source memory is normal heap.
- KMemoryState src_state{};
- KMemoryPermission src_perm{};
- size_t num_src_allocator_blocks{};
- R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
- src_address, size, KMemoryState::All, KMemoryState::Normal,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Verify that the destination memory is unmapped.
- size_t num_dst_allocator_blocks{};
- R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Map the code memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being mapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
- AddRegionToPages(src_address, num_pages, pg);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reprotect the source as kernel-read/not mapped.
- const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
- KMemoryPermission::NotMapped);
- R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
-
- // Ensure that we unprotect the source pages on failure.
- auto unprot_guard = SCOPE_GUARD({
- ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
- .IsSuccess());
- });
-
- // Map the alias pages.
- const KPageProperties dst_properties = {new_perm, false, false,
- DisableMergeAttribute::DisableHead};
- R_TRY(
- this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
-
- // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
- // failure.
- unprot_guard.Cancel();
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
- src_state, new_perm, KMemoryAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
- KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy) {
- // Validate the mapping request.
- R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
- ResultInvalidMemoryRegion);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the source memory is locked normal heap.
- size_t num_src_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
- KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked));
-
- // Verify that the destination memory is aliasable code.
- size_t num_dst_allocator_blocks{};
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
- KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
-
- // Determine whether any pages being unmapped are code.
- bool any_code_pages = false;
- {
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
- while (true) {
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Check if the memory has code flag.
- if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
- any_code_pages = true;
- break;
- }
-
- // Check if we're done.
- if (dst_address + size - 1 <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- }
- }
-
- // Ensure that we maintain the instruction cache.
- bool reprotected_pages = false;
- SCOPE_EXIT({
- if (reprotected_pages && any_code_pages) {
- if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
- m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
- } else {
- m_system.InvalidateCpuInstructionCaches();
- }
- }
- });
-
- // Unmap.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Unmap the aliased copy of the pages.
- R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Try to set the permissions for the source pages back to what they should be.
- R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
- m_memory_block_manager.Update(
- std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
-
- // Note that we reprotected pages.
- reprotected_pages = true;
- }
-
- R_SUCCEED();
-}
-
-KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages) {
- KProcessAddress address = 0;
-
- if (num_pages <= region_num_pages) {
- if (this->IsAslrEnabled()) {
- UNIMPLEMENTED();
- }
- // Find the first free area.
- if (address == 0) {
- address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
- alignment, offset, guard_pages);
- }
- }
-
- return address;
-}
-
-Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
- ASSERT(this->IsLockedByCurrentThread());
-
- const size_t size = num_pages * PageSize;
-
- // We're making a new group, not adding to an existing one.
- R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
- ResultInvalidCurrentMemory);
-
- // Prepare tracking variables.
- KPhysicalAddress cur_addr = next_entry.phys_addr;
- size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
- size_t tot_size = cur_size;
-
- // Iterate, adding to group as we go.
- const auto& memory_layout = m_system.Kernel().MemoryLayout();
- while (tot_size < size) {
- R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
- ResultInvalidCurrentMemory);
-
- if (next_entry.phys_addr != (cur_addr + cur_size)) {
- const size_t cur_pages = cur_size / PageSize;
-
- R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
- R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
- cur_addr = next_entry.phys_addr;
- cur_size = next_entry.block_size;
- } else {
- cur_size += next_entry.block_size;
- }
-
- tot_size += next_entry.block_size;
- }
-
- // Ensure we add the right amount for the last block.
- if (tot_size > size) {
- cur_size -= (tot_size - size);
- }
-
- // Add the last block.
- const size_t cur_pages = cur_size / PageSize;
- R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
- R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
- R_SUCCEED();
-}
-
-bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
- ASSERT(this->IsLockedByCurrentThread());
-
- const size_t size = num_pages * PageSize;
- const auto& memory_layout = m_system.Kernel().MemoryLayout();
-
- // Empty groups are necessarily invalid.
- if (pg.empty()) {
- return false;
- }
-
- // We're going to validate that the group we'd expect is the group we see.
- auto cur_it = pg.begin();
- KPhysicalAddress cur_block_address = cur_it->GetAddress();
- size_t cur_block_pages = cur_it->GetNumPages();
-
- auto UpdateCurrentIterator = [&]() {
- if (cur_block_pages == 0) {
- if ((++cur_it) == pg.end()) {
- return false;
- }
-
- cur_block_address = cur_it->GetAddress();
- cur_block_pages = cur_it->GetNumPages();
- }
- return true;
- };
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
- return false;
- }
-
- // Prepare tracking variables.
- KPhysicalAddress cur_addr = next_entry.phys_addr;
- size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
- size_t tot_size = cur_size;
-
- // Iterate, comparing expected to actual.
- while (tot_size < size) {
- if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
- return false;
- }
-
- if (next_entry.phys_addr != (cur_addr + cur_size)) {
- const size_t cur_pages = cur_size / PageSize;
-
- if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
- return false;
- }
-
- if (!UpdateCurrentIterator()) {
- return false;
- }
-
- if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
- return false;
- }
-
- cur_block_address += cur_size;
- cur_block_pages -= cur_pages;
- cur_addr = next_entry.phys_addr;
- cur_size = next_entry.block_size;
- } else {
- cur_size += next_entry.block_size;
- }
-
- tot_size += next_entry.block_size;
- }
-
- // Ensure we compare the right amount for the last block.
- if (tot_size > size) {
- cur_size -= (tot_size - size);
- }
-
- if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
- return false;
- }
-
- if (!UpdateCurrentIterator()) {
- return false;
- }
-
- return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
-}
-
-Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
- KPageTable& src_page_table, KProcessAddress src_addr) {
- // Acquire the table locks.
- KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
-
- const size_t num_pages{size / PageSize};
-
- // Check that the memory is mapped in the destination process.
- size_t num_allocator_blocks;
- R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
- KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Check that the memory is mapped in the source process.
- R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
- KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- R_TRY(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Apply the memory block update.
- m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- m_system.InvalidateCpuInstructionCaches();
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- KProcessAddress address, size_t size,
- KMemoryPermission test_perm, KMemoryState dst_state) {
- // Validate pre-conditions.
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
- test_perm == KMemoryPermission::UserRead);
-
- // Check that the address is in range.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Get the source permission.
- const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
- ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
- : KMemoryPermission::UserRead;
-
- // Get aligned extents.
- const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
- const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
- const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
-
- const auto aligned_src_last = (aligned_src_end)-1;
- const auto mapping_src_last = (mapping_src_end)-1;
-
- // Get the test state and attribute mask.
- KMemoryState test_state;
- KMemoryAttribute test_attr_mask;
- switch (dst_state) {
- case KMemoryState::Ipc:
- test_state = KMemoryState::FlagCanUseIpc;
- test_attr_mask =
- KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonSecureIpc:
- test_state = KMemoryState::FlagCanUseNonSecureIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonDeviceIpc:
- test_state = KMemoryState::FlagCanUseNonDeviceIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- default:
- R_THROW(ResultInvalidCombination);
- }
-
- // Ensure that on failure, we roll back appropriately.
- size_t mapped_size = 0;
- ON_RESULT_FAILURE {
- if (mapped_size > 0) {
- this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
- src_perm);
- }
- };
-
- size_t blocks_needed = 0;
-
- // Iterate, mapping as needed.
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
- while (true) {
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Validate the current block.
- R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
- test_attr_mask, KMemoryAttribute::None));
-
- if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
- info.GetAddress() < GetInteger(mapping_src_end)) {
- const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
- ? info.GetAddress()
- : (mapping_src_start);
- const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
- : (mapping_src_end);
- const size_t cur_size = cur_end - cur_start;
-
- if (info.GetAddress() < GetInteger(mapping_src_start)) {
- ++blocks_needed;
- }
- if (mapping_src_last < info.GetLastAddress()) {
- ++blocks_needed;
- }
-
- // Set the permissions on the block, if we need to.
- if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
- R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
- OperationType::ChangePermissions));
- }
-
- // Note that we mapped this part.
- mapped_size += cur_size;
- }
-
- // If the block is at the end, we're done.
- if (aligned_src_last <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- ASSERT(it != m_memory_block_manager.end());
- }
-
- if (out_blocks_needed != nullptr) {
- ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- *out_blocks_needed = blocks_needed;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
- KProcessAddress src_addr, KMemoryPermission test_perm,
- KMemoryState dst_state, KPageTable& src_page_table,
- bool send) {
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(src_page_table.IsLockedByCurrentThread());
-
- // Check that we can theoretically map.
- const KProcessAddress region_start = m_alias_region_start;
- const size_t region_size = m_alias_region_end - m_alias_region_start;
- R_UNLESS(size < region_size, ResultOutOfAddressSpace);
-
- // Get aligned source extents.
- const KProcessAddress src_start = src_addr;
- const KProcessAddress src_end = src_addr + size;
- const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
- const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
- const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
- const KProcessAddress mapping_src_end =
- Common::AlignDown(GetInteger(src_start) + size, PageSize);
- const size_t aligned_src_size = aligned_src_end - aligned_src_start;
- const size_t mapping_src_size =
- (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
-
- // Select a random address to map at.
- KProcessAddress dst_addr =
- this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
- PageSize, 0, this->GetNumGuardPages());
-
- R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
-
- // Check that we can perform the operation we're about to perform.
- ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reserve space for any partial pages we allocate.
- const size_t unmapped_size = aligned_src_size - mapping_src_size;
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Ensure that we manage page references correctly.
- KPhysicalAddress start_partial_page = 0;
- KPhysicalAddress end_partial_page = 0;
- KProcessAddress cur_mapped_addr = dst_addr;
-
- // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
- // free on scope exit.
- SCOPE_EXIT({
- if (start_partial_page != 0) {
- m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
- }
- if (end_partial_page != 0) {
- m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
- }
- });
-
- ON_RESULT_FAILURE {
- if (cur_mapped_addr != dst_addr) {
- ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
- KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
- }
- };
-
- // Allocate the start page as needed.
- if (aligned_src_start < mapping_src_start) {
- start_partial_page =
- m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
- R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
- }
-
- // Allocate the end page as needed.
- if (mapping_src_end < aligned_src_end &&
- (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
- end_partial_page =
- m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
- R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
- }
-
- // Get the implementation.
- auto& src_impl = src_page_table.PageTableImpl();
-
- // Get the fill value for partial pages.
- const auto fill_val = m_ipc_fill_value;
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- bool traverse_valid =
- src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
- ASSERT(traverse_valid);
-
- // Prepare tracking variables.
- KPhysicalAddress cur_block_addr = next_entry.phys_addr;
- size_t cur_block_size =
- next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
- size_t tot_block_size = cur_block_size;
-
- // Map the start page, if we have one.
- if (start_partial_page != 0) {
- // Ensure the page holds correct data.
- const KVirtualAddress start_partial_virt =
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
- if (send) {
- const size_t partial_offset = src_start - aligned_src_start;
- size_t copy_size, clear_size;
- if (src_end < mapping_src_start) {
- copy_size = size;
- clear_size = mapping_src_start - src_end;
- } else {
- copy_size = mapping_src_start - src_start;
- clear_size = 0;
- }
-
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
- partial_offset);
- std::memcpy(
- m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
- m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr)) +
- partial_offset),
- copy_size);
- if (clear_size > 0) {
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) +
- partial_offset + copy_size),
- fill_val, clear_size);
- }
- } else {
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
- PageSize);
- }
-
- // Map the page.
- R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
-
- // Update tracking extents.
- cur_mapped_addr += PageSize;
- cur_block_addr += PageSize;
- cur_block_size -= PageSize;
-
- // If the block's size was one page, we may need to continue traversal.
- if (cur_block_size == 0 && aligned_src_size > PageSize) {
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- cur_block_addr = next_entry.phys_addr;
- cur_block_size = next_entry.block_size;
- tot_block_size += next_entry.block_size;
- }
- }
-
- // Map the remaining pages.
- while (aligned_src_start + tot_block_size < mapping_src_end) {
- // Continue the traversal.
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- // Process the block.
- if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
- // Map the block we've been processing so far.
- R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
- cur_block_addr));
-
- // Update tracking extents.
- cur_mapped_addr += cur_block_size;
- cur_block_addr = next_entry.phys_addr;
- cur_block_size = next_entry.block_size;
- } else {
- cur_block_size += next_entry.block_size;
- }
- tot_block_size += next_entry.block_size;
- }
-
- // Handle the last direct-mapped page.
- if (const KProcessAddress mapped_block_end =
- aligned_src_start + tot_block_size - cur_block_size;
- mapped_block_end < mapping_src_end) {
- const size_t last_block_size = mapping_src_end - mapped_block_end;
-
- // Map the last block.
- R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
- cur_block_addr));
-
- // Update tracking extents.
- cur_mapped_addr += last_block_size;
- cur_block_addr += last_block_size;
- if (mapped_block_end + cur_block_size < aligned_src_end &&
- cur_block_size == last_block_size) {
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- cur_block_addr = next_entry.phys_addr;
- }
- }
-
- // Map the end page, if we have one.
- if (end_partial_page != 0) {
- // Ensure the page holds correct data.
- const KVirtualAddress end_partial_virt =
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
- if (send) {
- const size_t copy_size = src_end - mapping_src_end;
- std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)),
- m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr))),
- copy_size);
- std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
- fill_val, PageSize - copy_size);
- } else {
- std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
- PageSize);
- }
-
- // Map the page.
- R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
- }
-
- // Update memory blocks to reflect our changes
- m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
- dst_state, test_perm, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Set the output address.
- *out_addr = dst_addr + (src_start - aligned_src_start);
-
- // We succeeded.
- memory_reservation.Commit();
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
- KPageTable& src_page_table, KMemoryPermission test_perm,
- KMemoryState dst_state, bool send) {
- // For convenience, alias this.
- KPageTable& dst_page_table = *this;
-
- // Acquire the table locks.
- KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(std::addressof(src_page_table));
-
- // Perform client setup.
- size_t num_allocator_blocks;
- R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
- std::addressof(num_allocator_blocks), src_addr, size,
- test_perm, dst_state));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- src_page_table.m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Get the mapped extents.
- const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
- const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
- const size_t src_map_size = src_map_end - src_map_start;
-
- // Ensure that we clean up appropriately if we fail after this.
- const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
- ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
- : KMemoryPermission::UserRead;
- ON_RESULT_FAILURE {
- if (src_map_end > src_map_start) {
- src_page_table.CleanupForIpcClientOnServerSetupFailure(
- updater.GetPageList(), src_map_start, src_map_size, src_perm);
- }
- };
-
- // Perform server setup.
- R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
- src_page_table, send));
-
- // If anything was mapped, ipc-lock the pages.
- if (src_map_start < src_map_end) {
- // Get the source permission.
- src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
- (src_map_end - src_map_start) / PageSize,
- &KMemoryBlock::LockForIpc, src_perm);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
- KMemoryState dst_state) {
- // Validate the address.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, dst_state, KMemoryPermission::UserRead,
- KMemoryPermission::UserRead, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Get aligned extents.
- const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
- const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
- const size_t aligned_size = aligned_end - aligned_start;
- const size_t aligned_num_pages = aligned_size / PageSize;
-
- // Unmap the pages.
- R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Update memory blocks.
- m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
- KMemoryState::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- // Release from the resource limit as relevant.
- const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
- const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
-
- R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
- KMemoryState dst_state) {
- // Validate the address.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Get aligned source extents.
- const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
- const KProcessAddress mapping_last = mapping_end - 1;
- const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
-
- // If nothing was mapped, we're actually done immediately.
- R_SUCCEED_IF(mapping_size == 0);
-
- // Get the test state and attribute mask.
- KMemoryState test_state;
- KMemoryAttribute test_attr_mask;
- switch (dst_state) {
- case KMemoryState::Ipc:
- test_state = KMemoryState::FlagCanUseIpc;
- test_attr_mask =
- KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonSecureIpc:
- test_state = KMemoryState::FlagCanUseNonSecureIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonDeviceIpc:
- test_state = KMemoryState::FlagCanUseNonDeviceIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- default:
- R_THROW(ResultInvalidCombination);
- }
-
- // Lock the table.
- // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
- // convention elsewhere in KPageTable.
- KScopedLightLock lk(m_general_lock);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Ensure that on failure, we roll back appropriately.
- size_t mapped_size = 0;
- ON_RESULT_FAILURE {
- if (mapped_size > 0) {
- // Determine where the mapping ends.
- const auto mapped_end = (mapping_start) + mapped_size;
- const auto mapped_last = mapped_end - 1;
-
- // Get current and next iterators.
- KMemoryBlockManager::const_iterator start_it =
- m_memory_block_manager.FindIterator(mapping_start);
- KMemoryBlockManager::const_iterator next_it = start_it;
- ++next_it;
-
- // Get the current block info.
- KMemoryInfo cur_info = start_it->GetMemoryInfo();
-
- // Create tracking variables.
- KProcessAddress cur_address = cur_info.GetAddress();
- size_t cur_size = cur_info.GetSize();
- bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
- bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
- bool first =
- cur_info.GetIpcDisableMergeCount() == 1 &&
- (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
- KMemoryBlockDisableMergeAttribute::None;
-
- while (((cur_address) + cur_size - 1) < mapped_last) {
- // Check that we have a next block.
- ASSERT(next_it != m_memory_block_manager.end());
-
- // Get the next info.
- const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
- // Check if we can consolidate the next block's permission set with the current one.
-
- const bool next_perm_eq =
- next_info.GetPermission() == next_info.GetOriginalPermission();
- const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
- if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
- cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
- // We can consolidate the reprotection for the current and next block into a
- // single call.
- cur_size += next_info.GetSize();
- } else {
- // We have to operate on the current block.
- if ((cur_needs_set_perm || first) && !cur_perm_eq) {
- ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
-
- // Advance.
- cur_address = next_info.GetAddress();
- cur_size = next_info.GetSize();
- first = false;
- }
-
- // Advance.
- cur_info = next_info;
- cur_perm_eq = next_perm_eq;
- cur_needs_set_perm = next_needs_set_perm;
- ++next_it;
- }
-
- // Process the last block.
- if ((first || cur_needs_set_perm) && !cur_perm_eq) {
- ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
- }
- };
-
- // Iterate, reprotecting as needed.
- {
- // Get current and next iterators.
- KMemoryBlockManager::const_iterator start_it =
- m_memory_block_manager.FindIterator(mapping_start);
- KMemoryBlockManager::const_iterator next_it = start_it;
- ++next_it;
-
- // Validate the current block.
- KMemoryInfo cur_info = start_it->GetMemoryInfo();
- ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
- KMemoryPermission::None,
- test_attr_mask | KMemoryAttribute::IpcLocked,
- KMemoryAttribute::IpcLocked)
- .IsSuccess());
-
- // Create tracking variables.
- KProcessAddress cur_address = cur_info.GetAddress();
- size_t cur_size = cur_info.GetSize();
- bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
- bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
- bool first =
- cur_info.GetIpcDisableMergeCount() == 1 &&
- (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
- KMemoryBlockDisableMergeAttribute::None;
-
- while ((cur_address + cur_size - 1) < mapping_last) {
- // Check that we have a next block.
- ASSERT(next_it != m_memory_block_manager.end());
-
- // Get the next info.
- const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
- // Validate the next block.
- ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
- KMemoryPermission::None, KMemoryPermission::None,
- test_attr_mask | KMemoryAttribute::IpcLocked,
- KMemoryAttribute::IpcLocked)
- .IsSuccess());
-
- // Check if we can consolidate the next block's permission set with the current one.
- const bool next_perm_eq =
- next_info.GetPermission() == next_info.GetOriginalPermission();
- const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
- if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
- cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
- // We can consolidate the reprotection for the current and next block into a single
- // call.
- cur_size += next_info.GetSize();
- } else {
- // We have to operate on the current block.
- if ((cur_needs_set_perm || first) && !cur_perm_eq) {
- R_TRY(Operate(cur_address, cur_size / PageSize,
- cur_needs_set_perm ? cur_info.GetOriginalPermission()
- : cur_info.GetPermission(),
- OperationType::ChangePermissions));
- }
-
- // Mark that we mapped the block.
- mapped_size += cur_size;
-
- // Advance.
- cur_address = next_info.GetAddress();
- cur_size = next_info.GetSize();
- first = false;
- }
-
- // Advance.
- cur_info = next_info;
- cur_perm_eq = next_perm_eq;
- cur_needs_set_perm = next_needs_set_perm;
- ++next_it;
- }
-
- // Process the last block.
- const auto lock_count =
- cur_info.GetIpcLockCount() +
- (next_it != m_memory_block_manager.end()
- ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
- : 0);
- if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
- R_TRY(Operate(cur_address, cur_size / PageSize,
- cur_needs_set_perm ? cur_info.GetOriginalPermission()
- : cur_info.GetPermission(),
- OperationType::ChangePermissions));
- }
- }
-
- // Create an update allocator.
- // NOTE: Guaranteed zero blocks needed here.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, 0);
- R_TRY(allocator_result);
-
- // Unlock the pages.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
- mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
- KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
- KProcessAddress address, size_t size,
- KMemoryPermission prot_perm) {
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(Common::IsAligned(GetInteger(address), PageSize));
- ASSERT(Common::IsAligned(size, PageSize));
-
- // Get the mapped extents.
- const KProcessAddress src_map_start = address;
- const KProcessAddress src_map_end = address + size;
- const KProcessAddress src_map_last = src_map_end - 1;
-
- // This function is only invoked when there's something to do.
- ASSERT(src_map_end > src_map_start);
-
- // Iterate over blocks, fixing permissions.
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
- while (true) {
- const KMemoryInfo info = it->GetMemoryInfo();
-
- const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
- ? info.GetAddress()
- : GetInteger(src_map_start);
- const auto cur_end =
- src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
-
- // If we can, fix the protections on the block.
- if ((info.GetIpcLockCount() == 0 &&
- (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
- (info.GetIpcLockCount() != 0 &&
- (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
- // Check if we actually need to fix the protections on the block.
- if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
- (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
- ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
- }
-
- // If we're past the end of the region, we're done.
- if (src_map_last <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- ASSERT(it != m_memory_block_manager.end());
- }
-}
-
-Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
- // Calculate the last address for convenience.
- const KProcessAddress last_address = address + size - 1;
-
- // Define iteration variables.
- KProcessAddress cur_address;
- size_t mapped_size;
-
- // The entire mapping process can be retried.
- while (true) {
- // Check if the memory is already mapped.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Iterate over the memory.
- cur_address = address;
- mapped_size = 0;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- if (info.GetState() != KMemoryState::Free) {
- mapped_size += (last_address + 1 - cur_address);
- }
- break;
- }
-
- // Track the memory if it's mapped.
- if (info.GetState() != KMemoryState::Free) {
- mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If the size mapped is the size requested, we've nothing to do.
- R_SUCCEED_IF(size == mapped_size);
- }
-
- // Allocate and map the memory.
- {
- // Reserve the memory from the process resource limit.
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Allocate pages for the new memory.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
- &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
-
- // If we fail in the next bit (or retry), we need to cleanup the pages.
- // auto pg_guard = SCOPE_GUARD {
- // pg.OpenFirst();
- // pg.Close();
- //};
-
- // Map the memory.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- size_t num_allocator_blocks = 0;
-
- // Verify that nobody has mapped memory since we first checked.
- {
- // Iterate over the memory.
- size_t checked_mapped_size = 0;
- cur_address = address;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- const bool is_free = info.GetState() == KMemoryState::Free;
- if (is_free) {
- if (info.GetAddress() < GetInteger(address)) {
- ++num_allocator_blocks;
- }
- if (last_address < info.GetLastAddress()) {
- ++num_allocator_blocks;
- }
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- if (!is_free) {
- checked_mapped_size += (last_address + 1 - cur_address);
- }
- break;
- }
-
- // Track the memory if it's mapped.
- if (!is_free) {
- checked_mapped_size +=
- KProcessAddress(info.GetEndAddress()) - cur_address;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If the size now isn't what it was before, somebody mapped or unmapped
- // concurrently. If this happened, retry.
- if (mapped_size != checked_mapped_size) {
- continue;
- }
- }
-
- // Create an update allocator.
- ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Prepare to iterate over the memory.
- auto pg_it = pg.begin();
- KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
-
- // Reset the current tracking address, and make sure we clean up on failure.
- // pg_guard.Cancel();
- cur_address = address;
- ON_RESULT_FAILURE {
- if (cur_address > address) {
- const KProcessAddress last_unmap_address = cur_address - 1;
-
- // Iterate, unmapping the pages.
- cur_address = address;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If the memory state is free, we mapped it and need to unmap it.
- if (info.GetState() == KMemoryState::Free) {
- // Determine the range to unmap.
- const size_t cur_pages =
- std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_unmap_address + 1 - cur_address) /
- PageSize;
-
- // Unmap.
- ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
- OperationType::Unmap)
- .IsSuccess());
- }
-
- // Check if we're done.
- if (last_unmap_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
- }
-
- // Release any remaining unmapped memory.
- m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
- m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
- for (++pg_it; pg_it != pg.end(); ++pg_it) {
- m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
- pg_it->GetNumPages());
- m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
- pg_it->GetNumPages());
- }
- };
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If it's unmapped, we need to map it.
- if (info.GetState() == KMemoryState::Free) {
- // Determine the range to map.
- size_t map_pages =
- std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
-
- // While we have pages to map, map them.
- {
- // Create a page group for the current mapping range.
- KPageGroup cur_pg(m_kernel, m_block_info_manager);
- {
- ON_RESULT_FAILURE_2 {
- cur_pg.OpenFirst();
- cur_pg.Close();
- };
-
- size_t remain_pages = map_pages;
- while (remain_pages > 0) {
- // Check if we're at the end of the physical block.
- if (pg_pages == 0) {
- // Ensure there are more pages to map.
- ASSERT(pg_it != pg.end());
-
- // Advance our physical block.
- ++pg_it;
- pg_phys_addr = pg_it->GetAddress();
- pg_pages = pg_it->GetNumPages();
- }
-
- // Add whatever we can to the current block.
- const size_t cur_pages = std::min(pg_pages, remain_pages);
- R_TRY(cur_pg.AddBlock(pg_phys_addr +
- ((pg_pages - cur_pages) * PageSize),
- cur_pages));
-
- // Advance.
- remain_pages -= cur_pages;
- pg_pages -= cur_pages;
- }
- }
-
- // Map the pages.
- R_TRY(this->Operate(cur_address, map_pages, cur_pg,
- OperationType::MapFirstGroup));
- }
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // We succeeded, so commit the memory reservation.
- memory_reservation.Commit();
-
- // Increase our tracked mapped size.
- m_mapped_physical_memory_size += (size - mapped_size);
-
- // Update the relevant memory blocks.
- m_memory_block_manager.UpdateIfMatch(
- std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- address == this->GetAliasRegionStart()
- ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
- }
- }
- }
-}
-
-Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Calculate the last address for convenience.
- const KProcessAddress last_address = address + size - 1;
-
- // Define iteration variables.
- KProcessAddress map_start_address = 0;
- KProcessAddress map_last_address = 0;
-
- KProcessAddress cur_address;
- size_t mapped_size;
- size_t num_allocator_blocks = 0;
-
- // Check if the memory is mapped.
- {
- // Iterate over the memory.
- cur_address = address;
- mapped_size = 0;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Verify the memory's state.
- const bool is_normal = info.GetState() == KMemoryState::Normal &&
- info.GetAttribute() == KMemoryAttribute::None;
- const bool is_free = info.GetState() == KMemoryState::Free;
- R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
-
- if (is_normal) {
- R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
-
- if (map_start_address == 0) {
- map_start_address = cur_address;
- }
- map_last_address =
- (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
-
- if (info.GetAddress() < GetInteger(address)) {
- ++num_allocator_blocks;
- }
- if (last_address < info.GetLastAddress()) {
- ++num_allocator_blocks;
- }
-
- mapped_size += (map_last_address + 1 - cur_address);
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If there's nothing mapped, we've nothing to do.
- R_SUCCEED_IF(mapped_size == 0);
- }
-
- // Create an update allocator.
- ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Separate the mapping.
- R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
- KMemoryPermission::None, OperationType::Separate));
-
- // Reset the current tracking address, and make sure we clean up on failure.
- cur_address = address;
-
- // Iterate over the memory, unmapping as we go.
- auto it = m_memory_block_manager.FindIterator(cur_address);
-
- const auto clear_merge_attr =
- (it->GetState() == KMemoryState::Normal &&
- it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
- ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None;
-
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If the memory state is normal, we need to unmap it.
- if (info.GetState() == KMemoryState::Normal) {
- // Determine the range to unmap.
- const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
-
- // Unmap.
- ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // Release the memory resource.
- m_mapped_physical_memory_size -= mapped_size;
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size);
-
- // Update memory blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- clear_merge_attr);
-
- // We succeeded.
- R_SUCCEED();
-}
-
-Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that the source address's state is valid.
- KMemoryState src_state;
- size_t num_src_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
- std::addressof(num_src_allocator_blocks), src_address, size,
- KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Validate that the dst address's state is valid.
- size_t num_dst_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result;
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result;
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Map the memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being unmapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Create the page group representing the source.
- R_TRY(this->MakePageGroup(pg, src_address, num_pages));
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reprotect the source as kernel-read/not mapped.
- const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
- KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
- const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
- const KPageProperties src_properties = {new_src_perm, false, false,
- DisableMergeAttribute::DisableHeadBodyTail};
- R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
- OperationType::ChangePermissions));
-
- // Ensure that we unprotect the source pages on failure.
- ON_RESULT_FAILURE {
- const KPageProperties unprotect_properties = {
- KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::EnableHeadBodyTail};
- ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
- OperationType::ChangePermissions) == ResultSuccess);
- };
-
- // Map the alias pages.
- const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
- false));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
- src_state, new_src_perm, new_src_attr,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that the source address's state is valid.
- KMemoryState src_state;
- size_t num_src_allocator_blocks;
- R_TRY(this->CheckMemoryState(
- std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
- src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
- KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
- KMemoryAttribute::All, KMemoryAttribute::Locked));
-
- // Validate that the dst address's state is valid.
- KMemoryPermission dst_perm;
- size_t num_dst_allocator_blocks;
- R_TRY(this->CheckMemoryState(
- nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
- dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result;
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result;
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Unmap the memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being unmapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Create the page group representing the destination.
- R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
-
- // Ensure the page group is the valid for the source.
- R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Unmap the aliased copy of the pages.
- const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(
- this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
-
- // Ensure that we re-map the aliased pages on failure.
- ON_RESULT_FAILURE {
- this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
- };
-
- // Try to set the permissions for the source pages back to what they should be.
- const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::EnableAndMergeHeadBodyTail};
- R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
- OperationType::ChangePermissions));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(
- std::addressof(src_allocator), src_address, num_pages, src_state,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
- size_t num_pages, KMemoryPermission perm) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Create a page group to hold the pages we allocate.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Allocate the pages.
- R_TRY(
- m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
-
- // Ensure that the page group is closed when we're done working with it.
- SCOPE_EXIT({ pg.Close(); });
-
- // Clear all pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
- it.GetSize());
- }
-
- // Map the pages.
- R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
-}
-
-Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
- const KPageGroup& pg, const KPageProperties properties,
- bool reuse_ll) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Note the current address, so that we can iterate.
- const KProcessAddress start_address = address;
- KProcessAddress cur_address = address;
-
- // Ensure that we clean up on failure.
- ON_RESULT_FAILURE {
- ASSERT(!reuse_ll);
- if (cur_address != start_address) {
- const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
- unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
- }
- };
-
- // Iterate, mapping all pages in the group.
- for (const auto& block : pg) {
- // Map and advance.
- const KPageProperties cur_properties =
- (cur_address == start_address)
- ? properties
- : KPageProperties{properties.perm, properties.io, properties.uncached,
- DisableMergeAttribute::None};
- this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
- block.GetAddress());
- cur_address += block.GetSize();
- }
-
- // We succeeded!
- R_SUCCEED();
-}
-
-void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
- const KPageGroup& pg) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Note the current address, so that we can iterate.
- const KProcessAddress start_address = address;
- const KProcessAddress last_address = start_address + size - 1;
- const KProcessAddress end_address = last_address + 1;
-
- // Iterate over the memory.
- auto pg_it = pg.begin();
- ASSERT(pg_it != pg.end());
-
- KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
-
- auto it = m_memory_block_manager.FindIterator(start_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Determine the range to map.
- KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
- const KProcessAddress map_end_address =
- std::min<KProcessAddress>(info.GetEndAddress(), end_address);
- ASSERT(map_end_address != map_address);
-
- // Determine if we should disable head merge.
- const bool disable_head_merge =
- info.GetAddress() >= GetInteger(start_address) &&
- True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
- const KPageProperties map_properties = {
- info.GetPermission(), false, false,
- disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
-
- // While we have pages to map, map them.
- size_t map_pages = (map_end_address - map_address) / PageSize;
- while (map_pages > 0) {
- // Check if we're at the end of the physical block.
- if (pg_pages == 0) {
- // Ensure there are more pages to map.
- ASSERT(pg_it != pg.end());
-
- // Advance our physical block.
- ++pg_it;
- pg_phys_addr = pg_it->GetAddress();
- pg_pages = pg_it->GetNumPages();
- }
-
- // Map whatever we can.
- const size_t cur_pages = std::min(pg_pages, map_pages);
- ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
- pg_phys_addr) == ResultSuccess);
-
- // Advance.
- map_address += cur_pages * PageSize;
- map_pages -= cur_pages;
-
- pg_phys_addr += cur_pages * PageSize;
- pg_pages -= cur_pages;
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- }
-
- // Check that we re-mapped precisely the page group.
- ASSERT((++pg_it) == pg.end());
-}
-
-Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, bool is_pa_valid,
- KProcessAddress region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm) {
- ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
-
- // Ensure this is a valid map request.
- R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Find a random address to map at.
- KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
- 0, this->GetNumGuardPages());
- R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(Common::IsAligned(GetInteger(addr), alignment));
- ASSERT(this->CanContain(addr, num_pages * PageSize, state));
- ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- if (is_pa_valid) {
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
- } else {
- R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
- }
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- *out_addr = addr;
- R_SUCCEED();
-}
-
-Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- // Check that the map is in range.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Map the pages.
- R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
- // Check that the unmap is in range.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, state, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform the unmap.
- const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
- KProcessAddress region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid map request.
- const size_t num_pages = pg.GetNumPages();
- R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Find a random address to map at.
- KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
- 0, this->GetNumGuardPages());
- R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(this->CanContain(addr, num_pages * PageSize, state));
- ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- *out_addr = addr;
- R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
- KMemoryPermission perm) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid map request.
- const size_t num_pages = pg.GetNumPages();
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to map.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
- KMemoryState state) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid unmap request.
- const size_t num_pages = pg.GetNumPages();
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to unmap.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, state, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Check that the page group is valid.
- R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform unmapping operation.
- const KPageProperties properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) {
- // Ensure that the page group isn't null.
- ASSERT(out != nullptr);
-
- // Make sure that the region we're mapping is valid for the table.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to create the group.
- R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Create a new page group for the region.
- R_TRY(this->MakePageGroup(*out, address, num_pages));
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm) {
- const size_t num_pages = size / PageSize;
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory permission.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
- std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::FlagCode, KMemoryState::FlagCode,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Determine new perm/state.
- const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
- KMemoryState new_state = old_state;
- const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
- const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
- const bool was_x =
- (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
- ASSERT(!(is_w && is_x));
-
- if (is_w) {
- switch (old_state) {
- case KMemoryState::Code:
- new_state = KMemoryState::CodeData;
- break;
- case KMemoryState::AliasCode:
- new_state = KMemoryState::AliasCodeData;
- break;
- default:
- ASSERT(false);
- break;
- }
- }
-
- // Succeed if there's nothing to do.
- R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Perform mapping operation.
- const auto operation =
- was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
- R_TRY(Operate(addr, num_pages, new_perm, operation));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Ensure cache coherency, if we're setting pages as executable.
- if (is_x) {
- m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
- }
-
- R_SUCCEED();
-}
-
-KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
- KScopedLightLock lk(m_general_lock);
-
- return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
-}
-
-KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
- if (!Contains(addr, 1)) {
- return {
- .m_address = GetInteger(m_address_space_end),
- .m_size = 0 - GetInteger(m_address_space_end),
- .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
- .m_device_disable_merge_left_count = 0,
- .m_device_disable_merge_right_count = 0,
- .m_ipc_lock_count = 0,
- .m_device_use_count = 0,
- .m_ipc_disable_merge_count = 0,
- .m_permission = KMemoryPermission::None,
- .m_attribute = KMemoryAttribute::None,
- .m_original_permission = KMemoryPermission::None,
- .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
- };
- }
-
- return QueryInfoImpl(addr);
-}
-
-Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm) {
- const size_t num_pages = size / PageSize;
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory permission.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
- std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Determine new perm.
- const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
- R_SUCCEED_IF(old_perm == new_perm);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Perform mapping operation.
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
- const size_t num_pages = size / PageSize;
- ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
- KMemoryAttribute::SetMask);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory attribute.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- KMemoryAttribute old_attr;
- size_t num_allocator_blocks;
- constexpr auto AttributeTestMask =
- ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
- const KMemoryState state_test_mask =
- static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
- ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
- : 0) |
- ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
- ? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
- : 0));
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_test_mask, state_test_mask,
- KMemoryPermission::None, KMemoryPermission::None,
- AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // If we need to, perform a change attribute operation.
- if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
- // Perform operation.
- R_TRY(this->Operate(addr, num_pages, old_perm,
- OperationType::ChangePermissionsAndRefreshAndFlush, 0));
- }
-
- // Update the blocks.
- m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
- static_cast<KMemoryAttribute>(mask),
- static_cast<KMemoryAttribute>(attr));
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetMaxHeapSize(size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Only process page tables are allowed to set heap size.
- ASSERT(!this->IsKernel());
-
- m_max_heap_size = size;
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetHeapSize(u64* out, size_t size) {
- // Lock the physical memory mutex.
- KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
-
- // Try to perform a reduction in heap, instead of an extension.
- KProcessAddress cur_address{};
- size_t allocation_size{};
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that setting heap size is possible at all.
- R_UNLESS(!m_is_kernel, ResultOutOfMemory);
- R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
- ResultOutOfMemory);
- R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
-
- if (size < GetHeapSize()) {
- // The size being requested is less than the current size, so we need to free the end of
- // the heap.
-
- // Validate memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
- m_heap_region_start + size, GetHeapSize() - size,
- KMemoryState::All, KMemoryState::Normal,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Unmap the end of the heap.
- const auto num_pages = (GetHeapSize() - size) / PageSize;
- R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
- OperationType::Unmap));
-
- // Release the memory from the resource limit.
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize);
-
- // Apply the memory block update.
- m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
- num_pages, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None);
-
- // Update the current heap end.
- m_current_heap_end = m_heap_region_start + size;
-
- // Set the output.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- } else if (size == GetHeapSize()) {
- // The size requested is exactly the current size.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- } else {
- // We have to allocate memory. Determine how much to allocate and where while the table
- // is locked.
- cur_address = m_current_heap_end;
- allocation_size = size - GetHeapSize();
- }
- }
-
- // Reserve memory for the heap extension.
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Allocate pages for the heap extension.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, allocation_size / PageSize,
- KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
-
- // Clear all the newly allocated pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
- it.GetSize());
- }
-
- // Map the pages.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Ensure that the heap hasn't changed since we began executing.
- ASSERT(cur_address == m_current_heap_end);
-
- // Check the memory state.
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
- allocation_size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(
- std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Map the pages.
- const auto num_pages = allocation_size / PageSize;
- R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
-
- // Clear all the newly allocated pages.
- for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
- std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
- PageSize);
- }
-
- // We succeeded, so commit our memory reservation.
- memory_reservation.Commit();
-
- // Apply the memory block update.
- m_memory_block_manager.Update(
- std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Update the current heap end.
- m_current_heap_end = m_heap_region_start + size;
-
- // Set the output.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- }
-}
-
-Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
- size_t size, KMemoryPermission perm,
- bool is_aligned, bool check_heap) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- const auto test_state =
- (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
- (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
- size_t num_allocator_blocks;
- KMemoryState old_state;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
- std::addressof(num_allocator_blocks), address, size, test_state,
- test_state, perm, perm,
- KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
- KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
- &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
-
- // Set whether the locked memory was io.
- *out_is_io =
- static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
- bool check_heap) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- const auto test_state = KMemoryState::FlagCanDeviceMap |
- (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_allocator_blocks), address, size, test_state, test_state,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
- m_enable_device_address_space_merge
- ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
- : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
- KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
- KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
- &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
- size_t size) {
- R_RETURN(this->LockMemoryAndOpen(
- nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
- KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
- KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
- KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
- R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
- KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Locked, nullptr));
-}
-
-Result KPageTable::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
- KMemoryPermission perm) {
- R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
- KMemoryState::FlagCanTransfer, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForTransferMemory(KProcessAddress address, size_t size,
- const KPageGroup& pg) {
- R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
- KMemoryState::FlagCanTransfer, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Locked, std::addressof(pg)));
-}
-
-Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
- R_RETURN(this->LockMemoryAndOpen(
- out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
- KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
- R_RETURN(this->UnlockMemory(
- addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
- KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
-}
-
-bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
- auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
- for (u64 offset{}; offset < size; offset += PageSize) {
- if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
- return false;
- }
- start_ptr += PageSize;
- }
- return true;
-}
-
-void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
- KPageGroup& page_linked_list) {
- KProcessAddress addr{start};
- while (addr < start + (num_pages * PageSize)) {
- const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
- ASSERT(paddr != 0);
- page_linked_list.AddBlock(paddr, 1);
- addr += PageSize;
- }
-}
-
-KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
- u64 needed_num_pages, size_t align) {
- if (m_enable_aslr) {
- UNIMPLEMENTED();
- }
- return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
- IsKernel() ? 1 : 4);
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
- OperationType operation) {
- ASSERT(this->IsLockedByCurrentThread());
-
- ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
- ASSERT(num_pages > 0);
- ASSERT(num_pages == page_group.GetNumPages());
-
- switch (operation) {
- case OperationType::MapGroup:
- case OperationType::MapFirstGroup: {
- // We want to maintain a new reference to every page in the group.
- KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
-
- for (const auto& node : page_group) {
- const size_t size{node.GetNumPages() * PageSize};
-
- // Map the pages.
- m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
-
- addr += size;
- }
-
- // We succeeded! We want to persist the reference to the pages.
- spg.CancelClose();
-
- break;
- }
- default:
- ASSERT(false);
- break;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, KPhysicalAddress map_addr) {
- ASSERT(this->IsLockedByCurrentThread());
-
- ASSERT(num_pages > 0);
- ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
- ASSERT(ContainsPages(addr, num_pages));
-
- switch (operation) {
- case OperationType::Unmap: {
- // Ensure that any pages we track close on exit.
- KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
- SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
-
- this->AddRegionToPages(addr, num_pages, pages_to_close);
- m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
- break;
- }
- case OperationType::Map: {
- ASSERT(map_addr);
- ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
- m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
-
- // Open references to pages, if we should.
- if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
- m_kernel.MemoryManager().Open(map_addr, num_pages);
- }
- break;
- }
- case OperationType::Separate: {
- // HACK: Unimplemented.
- break;
- }
- case OperationType::ChangePermissions:
- case OperationType::ChangePermissionsAndRefresh:
- case OperationType::ChangePermissionsAndRefreshAndFlush:
- break;
- default:
- ASSERT(false);
- break;
- }
- R_SUCCEED();
-}
-
-void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
- while (page_list->Peek()) {
- [[maybe_unused]] auto page = page_list->Pop();
-
- // TODO(bunnei): Free pages once they are allocated in guest memory
- // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
- // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
- // this->GetPageTableManager().Free(page);
- }
-}
-
-KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return m_address_space_start;
- case Svc::MemoryState::Normal:
- return m_heap_region_start;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- return m_alias_region_start;
- case Svc::MemoryState::Stack:
- return m_stack_region_start;
- case Svc::MemoryState::Static:
- case Svc::MemoryState::ThreadLocal:
- return m_kernel_map_region_start;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return m_alias_code_region_start;
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- return m_code_region_start;
- default:
- UNREACHABLE();
- }
-}
-
-size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return m_address_space_end - m_address_space_start;
- case Svc::MemoryState::Normal:
- return m_heap_region_end - m_heap_region_start;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- return m_alias_region_end - m_alias_region_start;
- case Svc::MemoryState::Stack:
- return m_stack_region_end - m_stack_region_start;
- case Svc::MemoryState::Static:
- case Svc::MemoryState::ThreadLocal:
- return m_kernel_map_region_end - m_kernel_map_region_start;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return m_alias_code_region_end - m_alias_code_region_start;
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- return m_code_region_end - m_code_region_start;
- default:
- UNREACHABLE();
- }
-}
-
-bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
- const KProcessAddress end = addr + size;
- const KProcessAddress last = end - 1;
-
- const KProcessAddress region_start = this->GetRegionAddress(state);
- const size_t region_size = this->GetRegionSize(state);
-
- const bool is_in_region =
- region_start <= addr && addr < end && last <= region_start + region_size - 1;
- const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
- m_heap_region_start == m_heap_region_end);
- const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
- m_alias_region_start == m_alias_region_end);
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return is_in_region;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Static:
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Stack:
- case Svc::MemoryState::ThreadLocal:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return is_in_region && !is_in_heap && !is_in_alias;
- case Svc::MemoryState::Normal:
- ASSERT(is_in_heap);
- return is_in_region && !is_in_alias;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- ASSERT(is_in_alias);
- return is_in_region && !is_in_heap;
- default:
- return false;
- }
-}
-
-Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- // Validate the states match expectation.
- R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
- size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Get information about the first block.
- const KProcessAddress last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
- KMemoryInfo info = it->GetMemoryInfo();
-
- // If the start address isn't aligned, we need a block.
- const size_t blocks_for_start_align =
- (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
-
- while (true) {
- // Validate against the provided masks.
- R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
- // Break once we're done.
- if (last_addr <= info.GetLastAddress()) {
- break;
- }
-
- // Advance our iterator.
- it++;
- ASSERT(it != m_memory_block_manager.cend());
- info = it->GetMemoryInfo();
- }
-
- // If the end address isn't aligned, we need a block.
- const size_t blocks_for_end_align =
- (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
-
- if (out_blocks_needed != nullptr) {
- *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KMemoryBlockManager::const_iterator it,
- KProcessAddress last_addr, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Get information about the first block.
- KMemoryInfo info = it->GetMemoryInfo();
-
- // Validate all blocks in the range have correct state.
- const KMemoryState first_state = info.m_state;
- const KMemoryPermission first_perm = info.m_permission;
- const KMemoryAttribute first_attr = info.m_attribute;
- while (true) {
- // Validate the current block.
- R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
- R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
- ResultInvalidCurrentMemory);
-
- // Validate against the provided masks.
- R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
- // Break once we're done.
- if (last_addr <= info.GetLastAddress()) {
- break;
- }
-
- // Advance our iterator.
- it++;
- ASSERT(it != m_memory_block_manager.cend());
- info = it->GetMemoryInfo();
- }
-
- // Write output state.
- if (out_state != nullptr) {
- *out_state = first_state;
- }
- if (out_perm != nullptr) {
- *out_perm = first_perm;
- }
- if (out_attr != nullptr) {
- *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
- }
-
- // If the end address isn't aligned, we need a block.
- if (out_blocks_needed != nullptr) {
- const size_t blocks_for_end_align =
- (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
- ? 1
- : 0;
- *out_blocks_needed = blocks_for_end_align;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Check memory state.
- const KProcessAddress last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
- R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
- state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
-
- // If the start address isn't aligned, we need a block.
- if (out_blocks_needed != nullptr &&
- Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
- ++(*out_blocks_needed);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr) {
- // Validate basic preconditions.
- ASSERT((lock_attr & attr) == KMemoryAttribute::None);
- ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
- KMemoryAttribute::None);
-
- // Validate the lock request.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check that the output page group is empty, if it exists.
- if (out_pg) {
- ASSERT(out_pg->GetNumPages() == 0);
- }
-
- // Check the state.
- KMemoryState old_state{};
- KMemoryPermission old_perm{};
- KMemoryAttribute old_attr{};
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Get the physical address, if we're supposed to.
- if (out_KPhysicalAddress != nullptr) {
- ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
- }
-
- // Make the page group, if we're supposed to.
- if (out_pg != nullptr) {
- R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
- }
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Decide on new perm and attr.
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
- KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
-
- // Update permission, if we need to.
- if (new_perm != old_perm) {
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
- }
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- new_attr, KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
-
- // If we have an output page group, open.
- if (out_pg) {
- out_pg->Open();
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr, const KPageGroup* pg) {
- // Validate basic preconditions.
- ASSERT((attr_mask & lock_attr) == lock_attr);
- ASSERT((attr & lock_attr) == lock_attr);
-
- // Validate the unlock request.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the state.
- KMemoryState old_state{};
- KMemoryPermission old_perm{};
- KMemoryAttribute old_attr{};
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Check the page group.
- if (pg != nullptr) {
- R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
- }
-
- // Decide on new perm and attr.
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
- KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update permission, if we need to.
- if (new_perm != old_perm) {
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
- }
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- new_attr, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Locked);
-
- R_SUCCEED();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 3d64b6fb0..5541bc13f 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -3,548 +3,14 @@
#pragma once
-#include <memory>
-
-#include "common/common_funcs.h"
-#include "common/page_table.h"
-#include "core/file_sys/program_metadata.h"
-#include "core/hle/kernel/k_dynamic_resource_manager.h"
-#include "core/hle/kernel/k_light_lock.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_memory_manager.h"
-#include "core/hle/kernel/k_typed_address.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Core {
-class System;
-}
+#include "core/hle/kernel/k_page_table_base.h"
namespace Kernel {
-enum class DisableMergeAttribute : u8 {
- None = (0U << 0),
- DisableHead = (1U << 0),
- DisableHeadAndBody = (1U << 1),
- EnableHeadAndBody = (1U << 2),
- DisableTail = (1U << 3),
- EnableTail = (1U << 4),
- EnableAndMergeHeadBodyTail = (1U << 5),
- EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
- DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
-};
-
-struct KPageProperties {
- KMemoryPermission perm;
- bool io;
- bool uncached;
- DisableMergeAttribute disable_merge_attributes;
-};
-static_assert(std::is_trivial_v<KPageProperties>);
-static_assert(sizeof(KPageProperties) == sizeof(u32));
-
-class KBlockInfoManager;
-class KMemoryBlockManager;
-class KResourceLimit;
-class KSystemResource;
-
-class KPageTable final {
-protected:
- struct PageLinkedList;
-
-public:
- enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
-
- YUZU_NON_COPYABLE(KPageTable);
- YUZU_NON_MOVEABLE(KPageTable);
-
- explicit KPageTable(Core::System& system_);
- ~KPageTable();
-
- Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
- KProcessAddress code_addr, size_t code_size,
- KSystemResource* system_resource, KResourceLimit* resource_limit,
- Core::Memory::Memory& memory);
-
- void Finalize();
-
- Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
- KMemoryPermission perm);
- Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
- Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy);
- Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
- KProcessAddress src_addr);
- Result MapPhysicalMemory(KProcessAddress addr, size_t size);
- Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
- Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
- Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
- Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm);
- KMemoryInfo QueryInfo(KProcessAddress addr);
- Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
- Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
- Result SetMaxHeapSize(size_t size);
- Result SetHeapSize(u64* out, size_t size);
- Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
- KMemoryPermission perm, bool is_aligned, bool check_heap);
- Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
-
- Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
-
- Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
- Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
-
- Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
- KPageTable& src_page_table, KMemoryPermission test_perm,
- KMemoryState dst_state, bool send);
- Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
- Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
-
- Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
- KMemoryPermission perm);
- Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
- Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
- Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
- Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr);
-
- Common::PageTable& PageTableImpl() {
- return *m_page_table_impl;
- }
-
- const Common::PageTable& PageTableImpl() const {
- return *m_page_table_impl;
- }
-
- KBlockInfoManager* GetBlockInfoManager() {
- return m_block_info_manager;
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, KProcessAddress region_start,
- size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
- region_num_pages, state, perm));
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
-
- Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
- KMemoryPermission perm);
- Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
-
- Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
- KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm);
- Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
- KMemoryPermission perm);
- Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
- void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
- const KPageGroup& pg);
-
- KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
- size_t GetRegionSize(Svc::MemoryState state) const;
- bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
-
- KProcessAddress GetRegionAddress(KMemoryState state) const {
- return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
- size_t GetRegionSize(KMemoryState state) const {
- return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
- bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
- return this->CanContain(addr, size,
- static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
-
-protected:
- struct PageLinkedList {
- private:
- struct Node {
- Node* m_next;
- std::array<u8, PageSize - sizeof(Node*)> m_buffer;
- };
-
- public:
- constexpr PageLinkedList() = default;
-
- void Push(Node* n) {
- ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
- n->m_next = m_root;
- m_root = n;
- }
-
- void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
- this->Push(memory.GetPointer<Node>(GetInteger(addr)));
- }
-
- Node* Peek() const {
- return m_root;
- }
-
- Node* Pop() {
- Node* const r = m_root;
-
- m_root = r->m_next;
- r->m_next = nullptr;
-
- return r;
- }
-
- private:
- Node* m_root{};
- };
- static_assert(std::is_trivially_destructible<PageLinkedList>::value);
-
-private:
- enum class OperationType : u32 {
- Map = 0,
- MapGroup = 1,
- MapFirstGroup = 2,
- Unmap = 3,
- ChangePermissions = 4,
- ChangePermissionsAndRefresh = 5,
- ChangePermissionsAndRefreshAndFlush = 6,
- Separate = 7,
- };
-
- static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
- KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
- size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
- bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
- void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
- KMemoryInfo QueryInfoImpl(KProcessAddress addr);
- KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
- u64 needed_num_pages, size_t align);
- Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
- OperationType operation);
- Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, KPhysicalAddress map_addr = 0);
- void FinalizeUpdate(PageLinkedList* page_list);
-
- KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages);
-
- Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
- perm, attr_mask, attr));
- }
-
- Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
- state_mask, state, perm_mask, perm, attr_mask, attr,
- ignore_attr));
- }
- Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
- attr_mask, attr, ignore_attr));
- }
-
- Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr);
- Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr,
- const KPageGroup* pg);
-
- Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
- bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
-
- bool IsLockedByCurrentThread() const {
- return m_general_lock.IsLockedByCurrentThread();
- }
-
- bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
- ASSERT(this->IsLockedByCurrentThread());
-
- return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
- }
-
- bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- *out = GetPhysicalAddr(virt_addr);
-
- return *out != 0;
- }
-
- Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- KProcessAddress address, size_t size, KMemoryPermission test_perm,
- KMemoryState dst_state);
- Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
- KMemoryPermission test_perm, KMemoryState dst_state,
- KPageTable& src_page_table, bool send);
- void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
- size_t size, KMemoryPermission prot_perm);
-
- Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
- size_t num_pages, KMemoryPermission perm);
- Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
- const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
-
- mutable KLightLock m_general_lock;
- mutable KLightLock m_map_physical_memory_lock;
-
-public:
- constexpr KProcessAddress GetAddressSpaceStart() const {
- return m_address_space_start;
- }
- constexpr KProcessAddress GetAddressSpaceEnd() const {
- return m_address_space_end;
- }
- constexpr size_t GetAddressSpaceSize() const {
- return m_address_space_end - m_address_space_start;
- }
- constexpr KProcessAddress GetHeapRegionStart() const {
- return m_heap_region_start;
- }
- constexpr KProcessAddress GetHeapRegionEnd() const {
- return m_heap_region_end;
- }
- constexpr size_t GetHeapRegionSize() const {
- return m_heap_region_end - m_heap_region_start;
- }
- constexpr KProcessAddress GetAliasRegionStart() const {
- return m_alias_region_start;
- }
- constexpr KProcessAddress GetAliasRegionEnd() const {
- return m_alias_region_end;
- }
- constexpr size_t GetAliasRegionSize() const {
- return m_alias_region_end - m_alias_region_start;
- }
- constexpr KProcessAddress GetStackRegionStart() const {
- return m_stack_region_start;
- }
- constexpr KProcessAddress GetStackRegionEnd() const {
- return m_stack_region_end;
- }
- constexpr size_t GetStackRegionSize() const {
- return m_stack_region_end - m_stack_region_start;
- }
- constexpr KProcessAddress GetKernelMapRegionStart() const {
- return m_kernel_map_region_start;
- }
- constexpr KProcessAddress GetKernelMapRegionEnd() const {
- return m_kernel_map_region_end;
- }
- constexpr KProcessAddress GetCodeRegionStart() const {
- return m_code_region_start;
- }
- constexpr KProcessAddress GetCodeRegionEnd() const {
- return m_code_region_end;
- }
- constexpr KProcessAddress GetAliasCodeRegionStart() const {
- return m_alias_code_region_start;
- }
- constexpr KProcessAddress GetAliasCodeRegionEnd() const {
- return m_alias_code_region_end;
- }
- constexpr size_t GetAliasCodeRegionSize() const {
- return m_alias_code_region_end - m_alias_code_region_start;
- }
- size_t GetNormalMemorySize() {
- KScopedLightLock lk(m_general_lock);
- return GetHeapSize() + m_mapped_physical_memory_size;
- }
- constexpr size_t GetAddressSpaceWidth() const {
- return m_address_space_width;
- }
- constexpr size_t GetHeapSize() const {
- return m_current_heap_end - m_heap_region_start;
- }
- constexpr size_t GetNumGuardPages() const {
- return IsKernel() ? 1 : 4;
- }
- KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
- const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
- ASSERT(backing_addr);
- return backing_addr + GetInteger(addr);
- }
- constexpr bool Contains(KProcessAddress addr) const {
- return m_address_space_start <= addr && addr <= m_address_space_end - 1;
- }
- constexpr bool Contains(KProcessAddress addr, size_t size) const {
- return m_address_space_start <= addr && addr < addr + size &&
- addr + size - 1 <= m_address_space_end - 1;
- }
- constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
- return this->Contains(addr, size) && m_alias_region_start <= addr &&
- addr + size - 1 <= m_alias_region_end - 1;
- }
- constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
- return this->Contains(addr, size) && m_heap_region_start <= addr &&
- addr + size - 1 <= m_heap_region_end - 1;
- }
-
+class KPageTable final : public KPageTableBase {
public:
- static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return layout.GetLinearVirtualAddress(addr);
- }
-
- static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return layout.GetLinearPhysicalAddress(addr);
- }
-
- static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return GetLinearMappedVirtualAddress(layout, addr);
- }
-
- static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return GetLinearMappedPhysicalAddress(layout, addr);
- }
-
- static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return GetLinearMappedVirtualAddress(layout, addr);
- }
-
- static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return GetLinearMappedPhysicalAddress(layout, addr);
- }
-
-private:
- constexpr bool IsKernel() const {
- return m_is_kernel;
- }
- constexpr bool IsAslrEnabled() const {
- return m_enable_aslr;
- }
-
- constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
- return (m_address_space_start <= addr) &&
- (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
- (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
- }
-
-private:
- class KScopedPageTableUpdater {
- private:
- KPageTable* m_pt{};
- PageLinkedList m_ll;
-
- public:
- explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
- explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
- ~KScopedPageTableUpdater() {
- m_pt->FinalizeUpdate(this->GetPageList());
- }
-
- PageLinkedList* GetPageList() {
- return std::addressof(m_ll);
- }
- };
-
-private:
- KProcessAddress m_address_space_start{};
- KProcessAddress m_address_space_end{};
- KProcessAddress m_heap_region_start{};
- KProcessAddress m_heap_region_end{};
- KProcessAddress m_current_heap_end{};
- KProcessAddress m_alias_region_start{};
- KProcessAddress m_alias_region_end{};
- KProcessAddress m_stack_region_start{};
- KProcessAddress m_stack_region_end{};
- KProcessAddress m_kernel_map_region_start{};
- KProcessAddress m_kernel_map_region_end{};
- KProcessAddress m_code_region_start{};
- KProcessAddress m_code_region_end{};
- KProcessAddress m_alias_code_region_start{};
- KProcessAddress m_alias_code_region_end{};
-
- size_t m_max_heap_size{};
- size_t m_mapped_physical_memory_size{};
- size_t m_mapped_unsafe_physical_memory{};
- size_t m_mapped_insecure_memory{};
- size_t m_mapped_ipc_server_memory{};
- size_t m_address_space_width{};
-
- KMemoryBlockManager m_memory_block_manager;
- u32 m_allocate_option{};
-
- bool m_is_kernel{};
- bool m_enable_aslr{};
- bool m_enable_device_address_space_merge{};
-
- KMemoryBlockSlabManager* m_memory_block_slab_manager{};
- KBlockInfoManager* m_block_info_manager{};
- KResourceLimit* m_resource_limit{};
-
- u32 m_heap_fill_value{};
- u32 m_ipc_fill_value{};
- u32 m_stack_fill_value{};
- const KMemoryRegion* m_cached_physical_heap_region{};
-
- KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
- KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
-
- std::unique_ptr<Common::PageTable> m_page_table_impl;
-
- Core::System& m_system;
- KernelCore& m_kernel;
- Core::Memory::Memory* m_memory{};
+ explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {}
+ ~KPageTable() = default;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
new file mode 100644
index 000000000..6691586ed
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -0,0 +1,5739 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "common/settings.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_address_space_info.h"
+#include "core/hle/kernel/k_page_table_base.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_system_resource.h"
+
+namespace Kernel {
+
+namespace {
+
+class KScopedLightLockPair {
+ YUZU_NON_COPYABLE(KScopedLightLockPair);
+ YUZU_NON_MOVEABLE(KScopedLightLockPair);
+
+private:
+ KLightLock* m_lower;
+ KLightLock* m_upper;
+
+public:
+ KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
+ // Ensure our locks are in a consistent order.
+ if (std::addressof(lhs) <= std::addressof(rhs)) {
+ m_lower = std::addressof(lhs);
+ m_upper = std::addressof(rhs);
+ } else {
+ m_lower = std::addressof(rhs);
+ m_upper = std::addressof(lhs);
+ }
+
+ // Acquire both locks.
+ m_lower->Lock();
+ if (m_lower != m_upper) {
+ m_upper->Lock();
+ }
+ }
+
+ ~KScopedLightLockPair() {
+ // Unlock the upper lock.
+ if (m_upper != nullptr && m_upper != m_lower) {
+ m_upper->Unlock();
+ }
+
+ // Unlock the lower lock.
+ if (m_lower != nullptr) {
+ m_lower->Unlock();
+ }
+ }
+
+public:
+ // Utility.
+ void TryUnlockHalf(KLightLock& lock) {
+ // Only allow unlocking if the lock is half the pair.
+ if (m_lower != m_upper) {
+ // We want to be sure the lock is one we own.
+ if (m_lower == std::addressof(lock)) {
+ lock.Unlock();
+ m_lower = nullptr;
+ } else if (m_upper == std::addressof(lock)) {
+ lock.Unlock();
+ m_upper = nullptr;
+ }
+ }
+ }
+};
+
+template <typename AddressType>
+void InvalidateInstructionCache(Core::System& system, AddressType addr, u64 size) {
+ system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
+}
+
+template <typename AddressType>
+Result InvalidateDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+template <typename AddressType>
+Result StoreDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+template <typename AddressType>
+Result FlushDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+constexpr Common::MemoryPermission ConvertToMemoryPermission(KMemoryPermission perm) {
+ Common::MemoryPermission perms{};
+ if (True(perm & KMemoryPermission::UserRead)) {
+ perms |= Common::MemoryPermission::Read;
+ }
+ if (True(perm & KMemoryPermission::UserWrite)) {
+ perms |= Common::MemoryPermission::Write;
+ }
+#ifdef HAS_NCE
+ if (True(perm & KMemoryPermission::UserExecute)) {
+ perms |= Common::MemoryPermission::Execute;
+ }
+#endif
+ return perms;
+}
+
+} // namespace
+
+void KPageTableBase::MemoryRange::Open() {
+ // If the range contains heap pages, open them.
+ if (this->IsHeap()) {
+ m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize);
+ }
+}
+
+void KPageTableBase::MemoryRange::Close() {
+ // If the range contains heap pages, close them.
+ if (this->IsHeap()) {
+ m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize);
+ }
+}
+
+KPageTableBase::KPageTableBase(KernelCore& kernel)
+ : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel),
+ m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {}
+KPageTableBase::~KPageTableBase() = default;
+
+Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start,
+ KVirtualAddress end, Core::Memory::Memory& memory) {
+ // Initialize our members.
+ m_address_space_width =
+ static_cast<u32>(is_64_bit ? Common::BitSize<u64>() : Common::BitSize<u32>());
+ m_address_space_start = KProcessAddress(GetInteger(start));
+ m_address_space_end = KProcessAddress(GetInteger(end));
+ m_is_kernel = true;
+ m_enable_aslr = true;
+ m_enable_device_address_space_merge = false;
+
+ m_heap_region_start = 0;
+ m_heap_region_end = 0;
+ m_current_heap_end = 0;
+ m_alias_region_start = 0;
+ m_alias_region_end = 0;
+ m_stack_region_start = 0;
+ m_stack_region_end = 0;
+ m_kernel_map_region_start = 0;
+ m_kernel_map_region_end = 0;
+ m_alias_code_region_start = 0;
+ m_alias_code_region_end = 0;
+ m_code_region_start = 0;
+ m_code_region_end = 0;
+ m_max_heap_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_mapped_unsafe_physical_memory = 0;
+ m_mapped_insecure_memory = 0;
+ m_mapped_ipc_server_memory = 0;
+
+ m_memory_block_slab_manager =
+ m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
+ m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer();
+ m_resource_limit = m_kernel.GetSystemResourceLimit();
+
+ m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System,
+ KMemoryManager::Direction::FromFront);
+ m_heap_fill_value = MemoryFillValue_Zero;
+ m_ipc_fill_value = MemoryFillValue_Zero;
+ m_stack_fill_value = MemoryFillValue_Zero;
+
+ m_cached_physical_linear_region = nullptr;
+ m_cached_physical_heap_region = nullptr;
+
+ // Initialize our implementation.
+ m_impl = std::make_unique<Common::PageTable>();
+ m_impl->Resize(m_address_space_width, PageBits);
+
+ // Set the tracking memory.
+ m_memory = std::addressof(memory);
+
+ // Initialize our memory block manager.
+ R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+ m_memory_block_slab_manager));
+}
+
+Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+ bool enable_das_merge, bool from_back,
+ KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit,
+ Core::Memory::Memory& memory,
+ KProcessAddress aslr_space_start) {
+ // Calculate region extents.
+ const size_t as_width = GetAddressSpaceWidth(as_type);
+ const KProcessAddress start = 0;
+ const KProcessAddress end = (1ULL << as_width);
+
+ // Validate the region.
+ ASSERT(start <= code_address);
+ ASSERT(code_address < code_address + code_size);
+ ASSERT(code_address + code_size - 1 <= end - 1);
+
+ // Define helpers.
+ auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) {
+ return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
+ };
+ auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) {
+ return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
+ };
+
+ // Set our bit width and heap/alias sizes.
+ m_address_space_width = static_cast<u32>(GetAddressSpaceWidth(as_type));
+ size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+ size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+
+ // Adjust heap/alias size if we don't have an alias region.
+ if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) ==
+ Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
+ heap_region_size += alias_region_size;
+ alias_region_size = 0;
+ }
+
+ // Set code regions and determine remaining sizes.
+ KProcessAddress process_code_start;
+ KProcessAddress process_code_end;
+ size_t stack_region_size;
+ size_t kernel_map_region_size;
+ if (m_address_space_width == 39) {
+ alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+ heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+ stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
+ kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+ m_code_region_start = m_address_space_start + aslr_space_start +
+ GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = m_code_region_end;
+ process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment);
+ process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
+ } else {
+ stack_region_size = 0;
+ kernel_map_region_size = 0;
+ m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+ m_stack_region_start = m_code_region_start;
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
+ GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
+ m_stack_region_end = m_code_region_end;
+ m_kernel_map_region_start = m_code_region_start;
+ m_kernel_map_region_end = m_code_region_end;
+ process_code_start = m_code_region_start;
+ process_code_end = m_code_region_end;
+ }
+
+ // Set other basic fields.
+ m_enable_aslr = enable_aslr;
+ m_enable_device_address_space_merge = enable_das_merge;
+ m_address_space_start = start;
+ m_address_space_end = end;
+ m_is_kernel = false;
+ m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
+ m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
+ m_resource_limit = resource_limit;
+
+ // Determine the region we can place our undetermineds in.
+ KProcessAddress alloc_start;
+ size_t alloc_size;
+ if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >=
+ (GetInteger(end) - GetInteger(process_code_end))) {
+ alloc_start = m_code_region_start;
+ alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
+ } else {
+ alloc_start = process_code_end;
+ alloc_size = GetInteger(end) - GetInteger(process_code_end);
+ }
+ const size_t needed_size =
+ (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
+ R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
+
+ const size_t remaining_size = alloc_size - needed_size;
+
+ // Determine random placements for each region.
+ size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
+ if (enable_aslr) {
+ alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ }
+
+ // Setup heap and alias regions.
+ m_alias_region_start = alloc_start + alias_rnd;
+ m_alias_region_end = m_alias_region_start + alias_region_size;
+ m_heap_region_start = alloc_start + heap_rnd;
+ m_heap_region_end = m_heap_region_start + heap_region_size;
+
+ if (alias_rnd <= heap_rnd) {
+ m_heap_region_start += alias_region_size;
+ m_heap_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += heap_region_size;
+ m_alias_region_end += heap_region_size;
+ }
+
+ // Setup stack region.
+ if (stack_region_size) {
+ m_stack_region_start = alloc_start + stack_rnd;
+ m_stack_region_end = m_stack_region_start + stack_region_size;
+
+ if (alias_rnd < stack_rnd) {
+ m_stack_region_start += alias_region_size;
+ m_stack_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += stack_region_size;
+ m_alias_region_end += stack_region_size;
+ }
+
+ if (heap_rnd < stack_rnd) {
+ m_stack_region_start += heap_region_size;
+ m_stack_region_end += heap_region_size;
+ } else {
+ m_heap_region_start += stack_region_size;
+ m_heap_region_end += stack_region_size;
+ }
+ }
+
+ // Setup kernel map region.
+ if (kernel_map_region_size) {
+ m_kernel_map_region_start = alloc_start + kmap_rnd;
+ m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
+
+ if (alias_rnd < kmap_rnd) {
+ m_kernel_map_region_start += alias_region_size;
+ m_kernel_map_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += kernel_map_region_size;
+ m_alias_region_end += kernel_map_region_size;
+ }
+
+ if (heap_rnd < kmap_rnd) {
+ m_kernel_map_region_start += heap_region_size;
+ m_kernel_map_region_end += heap_region_size;
+ } else {
+ m_heap_region_start += kernel_map_region_size;
+ m_heap_region_end += kernel_map_region_size;
+ }
+
+ if (stack_region_size) {
+ if (stack_rnd < kmap_rnd) {
+ m_kernel_map_region_start += stack_region_size;
+ m_kernel_map_region_end += stack_region_size;
+ } else {
+ m_stack_region_start += kernel_map_region_size;
+ m_stack_region_end += kernel_map_region_size;
+ }
+ }
+ }
+
+ // Set heap and fill members.
+ m_current_heap_end = m_heap_region_start;
+ m_max_heap_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_mapped_unsafe_physical_memory = 0;
+ m_mapped_insecure_memory = 0;
+ m_mapped_ipc_server_memory = 0;
+
+ // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
+ const bool fill_memory = false;
+ m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
+ m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero;
+ m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero;
+
+ // Set allocation option.
+ m_allocate_option =
+ KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
+ : KMemoryManager::Direction::FromFront);
+
+ // Ensure that we regions inside our address space.
+ auto IsInAddressSpace = [&](KProcessAddress addr) {
+ return m_address_space_start <= addr && addr <= m_address_space_end;
+ };
+ ASSERT(IsInAddressSpace(m_alias_region_start));
+ ASSERT(IsInAddressSpace(m_alias_region_end));
+ ASSERT(IsInAddressSpace(m_heap_region_start));
+ ASSERT(IsInAddressSpace(m_heap_region_end));
+ ASSERT(IsInAddressSpace(m_stack_region_start));
+ ASSERT(IsInAddressSpace(m_stack_region_end));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_start));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_end));
+
+ // Ensure that we selected regions that don't overlap.
+ const KProcessAddress alias_start = m_alias_region_start;
+ const KProcessAddress alias_last = m_alias_region_end - 1;
+ const KProcessAddress heap_start = m_heap_region_start;
+ const KProcessAddress heap_last = m_heap_region_end - 1;
+ const KProcessAddress stack_start = m_stack_region_start;
+ const KProcessAddress stack_last = m_stack_region_end - 1;
+ const KProcessAddress kmap_start = m_kernel_map_region_start;
+ const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
+ ASSERT(alias_last < heap_start || heap_last < alias_start);
+ ASSERT(alias_last < stack_start || stack_last < alias_start);
+ ASSERT(alias_last < kmap_start || kmap_last < alias_start);
+ ASSERT(heap_last < stack_start || stack_last < heap_start);
+ ASSERT(heap_last < kmap_start || kmap_last < heap_start);
+
+ // Initialize our implementation.
+ m_impl = std::make_unique<Common::PageTable>();
+ m_impl->Resize(m_address_space_width, PageBits);
+
+ // Set the tracking memory.
+ m_memory = std::addressof(memory);
+
+ // Initialize our memory block manager.
+ R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+ m_memory_block_slab_manager));
+}
+
+void KPageTableBase::Finalize() {
+ auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
+ if (Settings::IsFastmemEnabled()) {
+ m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
+ }
+ };
+
+ // Finalize memory blocks.
+ m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
+
+ // Free any unsafe mapped memory.
+ if (m_mapped_unsafe_physical_memory) {
+ UNIMPLEMENTED();
+ }
+
+ // Release any insecure mapped memory.
+ if (m_mapped_insecure_memory) {
+ if (auto* const insecure_resource_limit =
+ KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ insecure_resource_limit != nullptr) {
+ insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ m_mapped_insecure_memory);
+ }
+ }
+
+ // Release any ipc server memory.
+ if (m_mapped_ipc_server_memory) {
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ m_mapped_ipc_server_memory);
+ }
+
+ // Close the backing page table, as the destructor is not called for guest objects.
+ m_impl.reset();
+}
+
+KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const {
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return m_address_space_start;
+ case Svc::MemoryState::Normal:
+ return m_heap_region_start;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ return m_alias_region_start;
+ case Svc::MemoryState::Stack:
+ return m_stack_region_start;
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::ThreadLocal:
+ return m_kernel_map_region_start;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return m_alias_code_region_start;
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ return m_code_region_start;
+ default:
+ UNREACHABLE();
+ }
+}
+
+size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const {
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return m_address_space_end - m_address_space_start;
+ case Svc::MemoryState::Normal:
+ return m_heap_region_end - m_heap_region_start;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ return m_alias_region_end - m_alias_region_start;
+ case Svc::MemoryState::Stack:
+ return m_stack_region_end - m_stack_region_start;
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::ThreadLocal:
+ return m_kernel_map_region_end - m_kernel_map_region_start;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return m_alias_code_region_end - m_alias_code_region_start;
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ return m_code_region_end - m_code_region_start;
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
+ const KProcessAddress end = addr + size;
+ const KProcessAddress last = end - 1;
+
+ const KProcessAddress region_start = this->GetRegionAddress(state);
+ const size_t region_size = this->GetRegionSize(state);
+
+ const bool is_in_region =
+ region_start <= addr && addr < end && last <= region_start + region_size - 1;
+ const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
+ m_heap_region_start == m_heap_region_end);
+ const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
+ m_alias_region_start == m_alias_region_end);
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return is_in_region;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Stack:
+ case Svc::MemoryState::ThreadLocal:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return is_in_region && !is_in_heap && !is_in_alias;
+ case Svc::MemoryState::Normal:
+ ASSERT(is_in_heap);
+ return is_in_region && !is_in_alias;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ ASSERT(is_in_alias);
+ return is_in_region && !is_in_heap;
+ default:
+ return false;
+ }
+}
+
+Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ // Validate the states match expectation.
+ R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
+ size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm,
+ KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Get information about the first block.
+ const KProcessAddress last_addr = addr + size - 1;
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+ KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the start address isn't aligned, we need a block.
+ const size_t blocks_for_start_align =
+ (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
+
+ while (true) {
+ // Validate against the provided masks.
+ R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+ // Break once we're done.
+ if (last_addr <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance our iterator.
+ it++;
+ ASSERT(it != m_memory_block_manager.cend());
+ info = it->GetMemoryInfo();
+ }
+
+ // If the end address isn't aligned, we need a block.
+ const size_t blocks_for_end_align =
+ (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+
+ if (out_blocks_needed != nullptr) {
+ *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KMemoryBlockManager::const_iterator it,
+ KProcessAddress last_addr, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Get information about the first block.
+ KMemoryInfo info = it->GetMemoryInfo();
+
+ // Validate all blocks in the range have correct state.
+ const KMemoryState first_state = info.m_state;
+ const KMemoryPermission first_perm = info.m_permission;
+ const KMemoryAttribute first_attr = info.m_attribute;
+ while (true) {
+ // Validate the current block.
+ R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
+ R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
+ ResultInvalidCurrentMemory);
+
+ // Validate against the provided masks.
+ R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+ // Break once we're done.
+ if (last_addr <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance our iterator.
+ it++;
+ ASSERT(it != m_memory_block_manager.cend());
+ info = it->GetMemoryInfo();
+ }
+
+ // Write output state.
+ if (out_state != nullptr) {
+ *out_state = first_state;
+ }
+ if (out_perm != nullptr) {
+ *out_perm = first_perm;
+ }
+ if (out_attr != nullptr) {
+ *out_attr = first_attr & ~ignore_attr;
+ }
+
+ // If the end address isn't aligned, we need a block.
+ if (out_blocks_needed != nullptr) {
+ const size_t blocks_for_end_align =
+ (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
+ ? 1
+ : 0;
+ *out_blocks_needed = blocks_for_end_align;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Check memory state.
+ const KProcessAddress last_addr = addr + size - 1;
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+ R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
+ state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
+
+ // If the start address isn't aligned, we need a block.
+ if (out_blocks_needed != nullptr &&
+ Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
+ ++(*out_blocks_needed);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr) {
+ // Validate basic preconditions.
+ ASSERT(False(lock_attr & attr));
+ ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
+
+ // Validate the lock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check that the output page group is empty, if it exists.
+ if (out_pg) {
+ ASSERT(out_pg->GetNumPages() == 0);
+ }
+
+ // Check the state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Get the physical address, if we're supposed to.
+ if (out_paddr != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ }
+
+ // Make the page group, if we're supposed to.
+ if (out_pg != nullptr) {
+ R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+ }
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = old_attr | static_cast<KMemoryAttribute>(lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ const KPageProperties properties = {new_perm, false,
+ True(old_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // If we have an output group, open.
+ if (out_pg) {
+ out_pg->Open();
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageGroup* pg) {
+ // Validate basic preconditions.
+ ASSERT((attr_mask & lock_attr) == lock_attr);
+ ASSERT((attr & lock_attr) == lock_attr);
+
+ // Validate the unlock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Check the page group.
+ if (pg != nullptr) {
+ R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = old_attr & ~static_cast<KMemoryAttribute>(lock_attr);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ const KPageProperties properties = {new_perm, false,
+ True(old_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Locked);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+ KProcessAddress address) const {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(out_info != nullptr);
+ ASSERT(out_page != nullptr);
+
+ const KMemoryBlock* block = m_memory_block_manager.FindBlock(address);
+ R_UNLESS(block != nullptr, ResultInvalidCurrentMemory);
+
+ *out_info = block->GetMemoryInfo();
+ out_page->flags = 0;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+ Svc::MemoryState state) const {
+ ASSERT(!this->IsLockedByCurrentThread());
+ ASSERT(out != nullptr);
+
+ const KProcessAddress region_start = this->GetRegionAddress(state);
+ const size_t region_size = this->GetRegionSize(state);
+
+ // Check that the address/size are potentially valid.
+ R_UNLESS((address < address + size), ResultNotFound);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ bool cur_valid = false;
+ TraversalEntry next_entry;
+ bool next_valid;
+ size_t tot_size = 0;
+
+ next_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start);
+ next_entry.block_size =
+ (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1)));
+
+ // Iterate, looking for entry.
+ while (true) {
+ if ((!next_valid && !cur_valid) ||
+ (next_valid && cur_valid &&
+ next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+ cur_entry.block_size += next_entry.block_size;
+ } else {
+ if (cur_valid && cur_entry.phys_addr <= address &&
+ address + size <= cur_entry.phys_addr + cur_entry.block_size) {
+ // Check if this region is valid.
+ const KProcessAddress mapped_address =
+ (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+ if (R_SUCCEEDED(this->CheckMemoryState(
+ mapped_address, size, KMemoryState::Mask, static_cast<KMemoryState>(state),
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None))) {
+ // It is!
+ *out = mapped_address;
+ R_SUCCEED();
+ }
+ }
+
+ // Update tracking variables.
+ tot_size += cur_entry.block_size;
+ cur_entry = next_entry;
+ cur_valid = next_valid;
+ }
+
+ if (cur_entry.block_size + tot_size >= region_size) {
+ break;
+ }
+
+ next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ }
+
+ // Check the last entry.
+ R_UNLESS(cur_valid, ResultNotFound);
+ R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound);
+ R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound);
+
+ // Check if the last region is valid.
+ const KProcessAddress mapped_address =
+ (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+ R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All,
+ static_cast<KMemoryState>(state),
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None)) {
+ R_CONVERT_ALL(ResultNotFound);
+ }
+ R_END_TRY_CATCH;
+
+ // We found the region.
+ *out = mapped_address;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
+ std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Validate that the dst address's state is valid.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Map the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reprotect the source as kernel-read/not mapped.
+ const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
+ const KPageProperties src_properties = {new_src_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableHeadBodyTail};
+ R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+ unprotect_properties, OperationType::ChangePermissions, true));
+ };
+
+ // Map the alias pages.
+ const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
+ false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_src_perm, new_src_attr,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
+ src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
+ KMemoryAttribute::All, KMemoryAttribute::Locked));
+
+ // Validate that the dst address's state is valid.
+ KMemoryPermission dst_perm;
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
+ dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Unmap the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+ // Ensure the page group is the valid for the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ dst_unmap_properties, OperationType::Unmap, false));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
+
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, src_state,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify that the source memory is normal heap.
+ KMemoryState src_state;
+ KMemoryPermission src_perm;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr,
+ std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Verify that the destination memory is unmapped.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Map the code memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reprotect the source as kernel-read/not mapped.
+ const KMemoryPermission new_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KPageProperties src_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail};
+ R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+ unprotect_properties, OperationType::ChangePermissions, true));
+ };
+
+ // Map the alias pages.
+ const KPageProperties dst_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(
+ this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+ KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify that the source memory is locked normal heap.
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked));
+
+ // Verify that the destination memory is aliasable code.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
+ KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
+
+ // Determine whether any pages being unmapped are code.
+ bool any_code_pages = false;
+ {
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
+ while (true) {
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if the memory has code flag.
+ if (True(info.GetState() & KMemoryState::FlagCode)) {
+ any_code_pages = true;
+ break;
+ }
+
+ // Check if we're done.
+ if (dst_address + size - 1 <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+ }
+
+ // Ensure that we maintain the instruction cache.
+ bool reprotected_pages = false;
+ SCOPE_EXIT({
+ if (reprotected_pages && any_code_pages) {
+ InvalidateInstructionCache(m_system, dst_address, size);
+ }
+ });
+
+ // Unmap.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+ // Verify that the page group contains the same pages as the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ dst_unmap_properties, OperationType::Unmap, false));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
+
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+
+ // Note that we reprotected pages.
+ reprotected_pages = true;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
+ // Get the insecure memory resource limit and pool.
+ auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ const auto insecure_pool =
+ static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
+
+ // Reserve the insecure memory.
+ // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached.
+ KScopedResourceReservation memory_reservation(insecure_resource_limit,
+ Svc::LimitableResource::PhysicalMemoryMax, size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory);
+
+ // Allocate pages for the insecure memory.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateAndOpen(
+ std::addressof(pg), size / PageSize,
+ KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront)));
+
+ // Close the opened pages when we're done with them.
+ // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+ // automatically.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+ static_cast<u32>(m_heap_fill_value), it.GetSize());
+ }
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the address's state is valid.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties,
+ OperationType::MapGroup, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages,
+ KMemoryState::Insecure, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Update our mapped insecure size.
+ m_mapped_insecure_memory += size;
+
+ // Commit the memory reservation.
+ memory_reservation.Commit();
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the memory.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ // Update our mapped insecure size.
+ m_mapped_insecure_memory -= size;
+
+ // Release the insecure memory from the insecure limit.
+ if (auto* const insecure_resource_limit =
+ KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ insecure_resource_limit != nullptr) {
+ insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size);
+ }
+
+ R_SUCCEED();
+}
+
+KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const {
+ KProcessAddress address = 0;
+
+ if (num_pages <= region_num_pages) {
+ if (this->IsAslrEnabled()) {
+ // Try to directly find a free area up to 8 times.
+ for (size_t i = 0; i < 8; i++) {
+ const size_t random_offset =
+ KSystemControl::GenerateRandomRange(
+ 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
+ alignment;
+ const KProcessAddress candidate =
+ Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
+
+ KMemoryInfo info;
+ Svc::PageInfo page_info;
+ R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info),
+ candidate));
+
+ if (info.m_state != KMemoryState::Free) {
+ continue;
+ }
+ if (!(region_start <= candidate)) {
+ continue;
+ }
+ if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) {
+ continue;
+ }
+ if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+ info.GetLastAddress())) {
+ continue;
+ }
+ if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+ region_start + region_num_pages * PageSize - 1)) {
+ continue;
+ }
+
+ address = candidate;
+ break;
+ }
+ // Fall back to finding the first free area with a random offset.
+ if (address == 0) {
+ // NOTE: Nintendo does not account for guard pages here.
+ // This may theoretically cause an offset to be chosen that cannot be mapped.
+ // We will account for guard pages.
+ const size_t offset_pages = KSystemControl::GenerateRandomRange(
+ 0, region_num_pages - num_pages - guard_pages);
+ address = m_memory_block_manager.FindFreeArea(
+ region_start + offset_pages * PageSize, region_num_pages - offset_pages,
+ num_pages, alignment, offset, guard_pages);
+ }
+ }
+ // Find the first free area.
+ if (address == 0) {
+ address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
+ alignment, offset, guard_pages);
+ }
+ }
+
+ return address;
+}
+
+size_t KPageTableBase::GetSize(KMemoryState state) const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Iterate, counting blocks with the desired state.
+ size_t total_size = 0;
+ for (KMemoryBlockManager::const_iterator it =
+ m_memory_block_manager.FindIterator(m_address_space_start);
+ it != m_memory_block_manager.end(); ++it) {
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+ if (info.GetState() == state) {
+ total_size += info.GetSize();
+ }
+ }
+
+ return total_size;
+}
+
+size_t KPageTableBase::GetCodeSize() const {
+ return this->GetSize(KMemoryState::Code);
+}
+
+size_t KPageTableBase::GetCodeDataSize() const {
+ return this->GetSize(KMemoryState::CodeData);
+}
+
+size_t KPageTableBase::GetAliasCodeSize() const {
+ return this->GetSize(KMemoryState::AliasCode);
+}
+
+size_t KPageTableBase::GetAliasCodeDataSize() const {
+ return this->GetSize(KMemoryState::AliasCodeData);
+}
+
+Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Create a page group to hold the pages we allocate.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Allocate the pages.
+ R_TRY(
+ m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
+
+ // Ensure that the page group is closed when we're done working with it.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+ static_cast<u32>(m_heap_fill_value), it.GetSize());
+ }
+
+ // Map the pages.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None};
+ R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup,
+ false));
+}
+
+Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ KProcessAddress cur_address = address;
+
+ // Ensure that we clean up on failure.
+ ON_RESULT_FAILURE {
+ ASSERT(!reuse_ll);
+ if (cur_address != start_address) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(page_list, start_address,
+ (cur_address - start_address) / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ }
+ };
+
+ // Iterate, mapping all pages in the group.
+ for (const auto& block : pg) {
+ // Map and advance.
+ const KPageProperties cur_properties =
+ (cur_address == start_address)
+ ? properties
+ : KPageProperties{properties.perm, properties.io, properties.uncached,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true,
+ cur_properties, OperationType::Map, reuse_ll));
+ cur_address += block.GetSize();
+ }
+
+ // We succeeded!
+ R_SUCCEED();
+}
+
+void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ const KProcessAddress last_address = start_address + size - 1;
+ const KProcessAddress end_address = last_address + 1;
+
+ // Iterate over the memory.
+ auto pg_it = pg.begin();
+ ASSERT(pg_it != pg.end());
+
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ auto it = m_memory_block_manager.FindIterator(start_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Determine the range to map.
+ KProcessAddress map_address = std::max<u64>(info.GetAddress(), GetInteger(start_address));
+ const KProcessAddress map_end_address =
+ std::min<u64>(info.GetEndAddress(), GetInteger(end_address));
+ ASSERT(map_end_address != map_address);
+
+ // Determine if we should disable head merge.
+ const bool disable_head_merge =
+ info.GetAddress() >= GetInteger(start_address) &&
+ True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
+ const KPageProperties map_properties = {
+ info.GetPermission(), false, false,
+ disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
+
+ // While we have pages to map, map them.
+ size_t map_pages = (map_end_address - map_address) / PageSize;
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true,
+ map_properties, OperationType::Map, true));
+
+ // Advance.
+ map_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+
+ // Check that we re-mapped precisely the page group.
+ ASSERT((++pg_it) == pg.end());
+}
+
+Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // We're making a new group, not adding to an existing one.
+ R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr),
+ ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, adding to group as we go.
+ while (tot_size < size) {
+ R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)),
+ ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we add the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // add the last block.
+ const size_t cur_pages = cur_size / PageSize;
+ R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ R_SUCCEED();
+}
+
+bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr,
+ size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // Empty groups are necessarily invalid.
+ if (pg.empty()) {
+ return false;
+ }
+
+ auto& impl = this->GetImpl();
+
+ // We're going to validate that the group we'd expect is the group we see.
+ auto cur_it = pg.begin();
+ KPhysicalAddress cur_block_address = cur_it->GetAddress();
+ size_t cur_block_pages = cur_it->GetNumPages();
+
+ auto UpdateCurrentIterator = [&]() {
+ if (cur_block_pages == 0) {
+ if ((++cur_it) == pg.end()) {
+ return false;
+ }
+
+ cur_block_address = cur_it->GetAddress();
+ cur_block_pages = cur_it->GetNumPages();
+ }
+ return true;
+ };
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) {
+ return false;
+ }
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, comparing expected to actual.
+ while (tot_size < size) {
+ if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
+ return false;
+ }
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ if (!IsHeapPhysicalAddress(cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
+ return false;
+ }
+
+ cur_block_address += cur_size;
+ cur_block_pages -= cur_pages;
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we compare the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ if (!IsHeapPhysicalAddress(cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
+}
+
+Result KPageTableBase::GetContiguousMemoryRangeWithState(
+ MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ auto& impl = this->GetImpl();
+
+ // Begin a traversal.
+ TraversalContext context;
+ TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address),
+ ResultInvalidCurrentMemory);
+
+ // Traverse until we have enough size or we aren't contiguous any more.
+ const KPhysicalAddress phys_address = cur_entry.phys_addr;
+ size_t contig_size;
+ for (contig_size =
+ cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1));
+ contig_size < size; contig_size += cur_entry.block_size) {
+ if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
+ break;
+ }
+ if (cur_entry.phys_addr != phys_address + contig_size) {
+ break;
+ }
+ }
+
+ // Take the minimum size for our region.
+ size = std::min(size, contig_size);
+
+ // Check that the memory is contiguous (modulo the reference count bit).
+ const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted;
+ const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+ if (!is_heap) {
+ R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask,
+ perm, attr_mask, attr));
+ }
+
+ // The memory is contiguous, so set the output range.
+ out->Set(phys_address, size, is_heap);
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory permission.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+ std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Determine new perm.
+ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+ R_SUCCEED_IF(old_perm == new_perm);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory permission.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+ std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::FlagCode, KMemoryState::FlagCode,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Make a new page group for the region.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Determine new perm/state.
+ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+ KMemoryState new_state = old_state;
+ const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
+ const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+ const bool was_x =
+ (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+ ASSERT(!(is_w && is_x));
+
+ if (is_w) {
+ switch (old_state) {
+ case KMemoryState::Code:
+ new_state = KMemoryState::CodeData;
+ break;
+ case KMemoryState::AliasCode:
+ new_state = KMemoryState::AliasCodeData;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // Create a page group, if we're setting execute permissions.
+ if (is_x) {
+ R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
+ }
+
+ // Succeed if there's nothing to do.
+ R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+ const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush
+ : OperationType::ChangePermissions;
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation,
+ false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Ensure cache coherency, if we're setting pages as executable.
+ if (is_x) {
+ for (const auto& block : pg) {
+ StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize());
+ }
+ InvalidateInstructionCache(m_system, addr, size);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr) {
+ const size_t num_pages = size / PageSize;
+ ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory attribute.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ constexpr KMemoryAttribute AttributeTestMask =
+ ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
+ const KMemoryState state_test_mask =
+ (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute
+ : KMemoryState::None) |
+ (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock
+ : KMemoryState::None);
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_test_mask, state_test_mask,
+ KMemoryPermission::None, KMemoryPermission::None,
+ AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // If we need to, perform a change attribute operation.
+ if (True(mask & KMemoryAttribute::Uncached)) {
+ // Determine the new attribute.
+ const KMemoryAttribute new_attr =
+ static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
+
+ // Perform operation.
+ const KPageProperties properties = {old_perm, false,
+ True(new_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissionsAndRefreshAndFlush, false));
+ }
+
+ // Update the blocks.
+ m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
+ // Lock the physical memory mutex.
+ KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
+
+ // Try to perform a reduction in heap, instead of an extension.
+ KProcessAddress cur_address;
+ size_t allocation_size;
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that setting heap size is possible at all.
+ R_UNLESS(!m_is_kernel, ResultOutOfMemory);
+ R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
+ ResultOutOfMemory);
+ R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
+
+ if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+ // The size being requested is less than the current size, so we need to free the end of
+ // the heap.
+
+ // Validate memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(num_allocator_blocks), m_heap_region_start + size,
+ (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All,
+ KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the end of the heap.
+ const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0,
+ false, unmap_properties, OperationType::Unmap, false));
+
+ // Release the memory from the resource limit.
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ num_pages * PageSize);
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
+ num_pages, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None);
+
+ // Update the current heap end.
+ m_current_heap_end = m_heap_region_start + size;
+
+ // Set the output.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+ // The size requested is exactly the current size.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ } else {
+ // We have to allocate memory. Determine how much to allocate and where while the table
+ // is locked.
+ cur_address = m_current_heap_end;
+ allocation_size = size - (m_current_heap_end - m_heap_region_start);
+ }
+ }
+
+ // Reserve memory for the heap extension.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the heap extension.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize,
+ m_allocate_option));
+
+ // Close the opened pages when we're done with them.
+ // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+ // automatically.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value,
+ it.GetSize());
+ }
+
+ // Map the pages.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Ensure that the heap hasn't changed since we began executing.
+ ASSERT(cur_address == m_current_heap_end);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
+ allocation_size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(
+ std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ const size_t num_pages = allocation_size / PageSize;
+ const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ (m_current_heap_end == m_heap_region_start)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg,
+ map_properties, OperationType::MapGroup, false));
+
+ // We succeeded, so commit our memory reservation.
+ memory_reservation.Commit();
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(
+ std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Update the current heap end.
+ m_current_heap_end = m_heap_region_start + size;
+
+ // Set the output.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ }
+}
+
+Result KPageTableBase::SetMaxHeapSize(size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Only process page tables are allowed to set heap size.
+ ASSERT(!this->IsKernel());
+
+ m_max_heap_size = size;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const {
+ // If the address is invalid, create a fake block.
+ if (!this->Contains(addr, 1)) {
+ *out_info = {
+ .m_address = GetInteger(m_address_space_end),
+ .m_size = 0 - GetInteger(m_address_space_end),
+ .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
+ .m_device_disable_merge_left_count = 0,
+ .m_device_disable_merge_right_count = 0,
+ .m_ipc_lock_count = 0,
+ .m_device_use_count = 0,
+ .m_ipc_disable_merge_count = 0,
+ .m_permission = KMemoryPermission::None,
+ .m_attribute = KMemoryAttribute::None,
+ .m_original_permission = KMemoryPermission::None,
+ .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
+ };
+ out_page_info->flags = 0;
+
+ R_SUCCEED();
+ }
+
+ // Otherwise, lock the table and query.
+ KScopedLightLock lk(m_general_lock);
+ R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
+}
+
+Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out,
+ KProcessAddress address) const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Align the address down to page size.
+ address = Common::AlignDown(GetInteger(address), PageSize);
+
+ // Verify that we can query the address.
+ KMemoryInfo info;
+ Svc::PageInfo page_info;
+ R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical,
+ KMemoryState::FlagCanQueryPhysical,
+ KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Prepare to traverse.
+ KPhysicalAddress phys_addr;
+ size_t phys_size;
+
+ KProcessAddress virt_addr = info.GetAddress();
+ KProcessAddress end_addr = info.GetEndAddress();
+
+ // Perform traversal.
+ {
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Set tracking variables.
+ phys_addr = next_entry.phys_addr;
+ phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+
+ // Iterate.
+ while (true) {
+ // Continue the traversal.
+ traverse_valid =
+ m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ if (!traverse_valid) {
+ break;
+ }
+
+ if (next_entry.phys_addr != (phys_addr + phys_size)) {
+ // Check if we're done.
+ if (virt_addr <= address && address <= virt_addr + phys_size - 1) {
+ break;
+ }
+
+ // Advance.
+ phys_addr = next_entry.phys_addr;
+ virt_addr += next_entry.block_size;
+ phys_size =
+ next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+ } else {
+ phys_size += next_entry.block_size;
+ }
+
+ // Check if we're done.
+ if (end_addr < virt_addr + phys_size) {
+ break;
+ }
+ }
+ ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1);
+
+ // Ensure we use the right size.
+ if (end_addr < virt_addr + phys_size) {
+ phys_size = end_addr - virt_addr;
+ }
+ }
+
+ // Set the output.
+ out->physical_address = GetInteger(phys_addr);
+ out->virtual_address = GetInteger(virt_addr);
+ out->size = phys_size;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list,
+ KPhysicalAddress phys_addr, size_t size, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(size > 0);
+
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress last = phys_addr + size - 1;
+
+ // Get region extents.
+ const KProcessAddress region_start = m_kernel_map_region_start;
+ const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
+ const size_t region_num_pages = region_size / PageSize;
+
+ ASSERT(this->CanContain(region_start, region_size, state));
+
+ // Locate the memory region.
+ const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ ASSERT(region->Contains(GetInteger(phys_addr)));
+
+ // Ensure that the region is mappable.
+ const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+ while (true) {
+ // Check that the region exists.
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ // Check the region attributes.
+ R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+ ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+
+ // Check if we're done.
+ if (GetInteger(last) <= region->GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ region = region->GetNext();
+ };
+
+ // Select an address to map at.
+ KProcessAddress addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const KPhysicalAddress aligned_phys =
+ Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+ R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+ const KPhysicalAddress last_aligned_paddr =
+ Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+ R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+ ResultInvalidAddress);
+
+ addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+ this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ }
+
+ // Check that we can map IO here.
+ ASSERT(this->CanContain(addr, size, state));
+ R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map,
+ false));
+
+ // Set the output address.
+ *out = addr;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the io memory.
+ KProcessAddress addr;
+ R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size,
+ KMemoryState::IoRegister, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize,
+ KMemoryState::IoRegister, perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+ size_t size, Svc::MemoryMapping mapping,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::None, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm);
+ const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister,
+ mapping == Svc::MemoryMapping::Uncached,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+
+ // Update the blocks.
+ const auto state =
+ mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister;
+ m_memory_block_manager.Update(
+ std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+ size_t size, Svc::MemoryMapping mapping) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
+ std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+ mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked));
+
+ // Validate that the region being unmapped corresponds to the physical range described.
+ {
+ // Get the impl.
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ ASSERT(
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address));
+
+ // Check that the physical region matches.
+ R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion);
+
+ // Iterate.
+ for (size_t checked_size =
+ next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+ checked_size < size; checked_size += next_entry.block_size) {
+ // Continue the traversal.
+ ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
+
+ // Check that the physical region matches.
+ R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion);
+ }
+ }
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // If the region being unmapped is Memory, synchronize.
+ if (mapping == Svc::MemoryMapping::Memory) {
+ // Change the region to be uncached.
+ const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties,
+ OperationType::ChangePermissionsAndRefresh, false));
+
+ // Temporarily unlock ourselves, so that other operations can occur while we flush the
+ // region.
+ m_general_lock.Unlock();
+ SCOPE_EXIT({ m_general_lock.Lock(); });
+
+ // Flush the region.
+ R_ASSERT(FlushDataCache(dst_address, size));
+ }
+
+ // Perform the unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(size > 0);
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress last = phys_addr + size - 1;
+
+ // Get region extents.
+ const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static);
+ const size_t region_size = this->GetRegionSize(KMemoryState::Static);
+ const size_t region_num_pages = region_size / PageSize;
+
+ // Locate the memory region.
+ const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ ASSERT(region->Contains(GetInteger(phys_addr)));
+ R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress);
+
+ // Check the region attributes.
+ const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+ R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+ ResultInvalidAddress);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Select an address to map at.
+ KProcessAddress addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const KPhysicalAddress aligned_phys =
+ Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+ R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+ const KPhysicalAddress last_aligned_paddr =
+ Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+ R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+ ResultInvalidAddress);
+
+ addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+ this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ }
+
+ // Check that we can map static here.
+ ASSERT(this->CanContain(addr, size, KMemoryState::Static));
+ R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static,
+ perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+ // Get the memory region.
+ const KMemoryRegion* region =
+ m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type);
+ R_UNLESS(region != nullptr, ResultOutOfRange);
+
+ // Check that the region is valid.
+ ASSERT(region->GetEndAddress() != 0);
+
+ // Map the region.
+ R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){
+ R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
+
+ // Ensure this is a valid map request.
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(Common::IsAligned(GetInteger(addr), alignment));
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ R_ASSERT(this->CheckMemoryState(
+ addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ if (is_pa_valid) {
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+ } else {
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
+ }
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check that the map is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
+ // Check that the unmap is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform the unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ R_ASSERT(this->CheckMemoryState(
+ addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to map.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
+ KMemoryState state) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid unmap request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to unmap.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Check that the page group is valid.
+ R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform unmapping operation.
+ const KPageProperties properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties,
+ OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address,
+ size_t num_pages, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) {
+ // Ensure that the page group isn't null.
+ ASSERT(out != nullptr);
+
+ // Make sure that the region we're mapping is valid for the table.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to create the group.
+ R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Create a new page group for the region.
+ R_TRY(this->MakePageGroup(*out, address, num_pages));
+
+ // Open a new reference to the pages in the group.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+ // Check that the region is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Check that the pages are linearly mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Invalidate the block.
+ if (cur_size > 0) {
+ // NOTE: Nintendo does not check the result of invalidation.
+ InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ // Advance.
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Check that the last block is linearly mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Invalidate the last block.
+ if (cur_size > 0) {
+ // NOTE: Nintendo does not check the result of invalidation.
+ InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
+ // Check pre-condition: this is being called on the current process.
+ ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable()));
+
+ // Check that the region is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+ // Invalidate the data cache.
+ R_RETURN(InvalidateDataCache(address, size));
+}
+
+Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lightly validate the region is in range.
+ R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Require that the memory either be user readable or debuggable.
+ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead,
+ KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None));
+ if (!can_read) {
+ const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+ R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+ }
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+ auto& dst_memory = GetCurrentMemory(m_system.Kernel());
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ FlushDataCache(copy_src, copy_size);
+ R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer);
+
+ dst_address += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ FlushDataCache(copy_src, cur_size);
+ R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ dst_address += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lightly validate the region is in range.
+ R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Require that the memory either be user writable or debuggable.
+ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+ if (!can_read) {
+ const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+ R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+ }
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+ auto& src_memory = GetCurrentMemory(m_system.Kernel());
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size),
+ ResultInvalidCurrentMemory);
+
+ StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size);
+
+ src_address += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size),
+ ResultInvalidCurrentMemory);
+
+ StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ src_address += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+
+ // Invalidate the instruction cache, as this svc allows modifying executable pages.
+ InvalidateInstructionCache(m_system, dst_address, size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr,
+ size_t size, KMemoryState state) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Determine the mapping extents.
+ const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+ const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+ const size_t map_size = map_end - map_start;
+
+ // Get the memory reference to write into.
+ auto& dst_memory = GetCurrentMemory(m_kernel);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Temporarily map the io memory.
+ KProcessAddress io_addr;
+ R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+ state, KMemoryPermission::UserRead));
+
+ // Ensure we unmap the io memory when we're done with it.
+ const KPageProperties unmap_properties =
+ KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+ SCOPE_EXIT({
+ R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ });
+
+ // Read the memory.
+ const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+ dst_memory.CopyBlock(dst_addr, read_addr, size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr,
+ size_t size, KMemoryState state) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Determine the mapping extents.
+ const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+ const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+ const size_t map_size = map_end - map_start;
+
+ // Get the memory reference to read from.
+ auto& src_memory = GetCurrentMemory(m_kernel);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Temporarily map the io memory.
+ KProcessAddress io_addr;
+ R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+ state, KMemoryPermission::UserReadWrite));
+
+ // Ensure we unmap the io memory when we're done with it.
+ const KPageProperties unmap_properties =
+ KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+ SCOPE_EXIT({
+ R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ });
+
+ // Write the memory.
+ const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+ R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size, KMemoryState state) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+ // We need to lock both this table, and the current process's table, so set up some aliases.
+ KPageTableBase& src_page_table = *this;
+ KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the desired range is readable io memory.
+ R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state,
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Read the memory.
+ KProcessAddress dst = dst_address;
+ const KProcessAddress last_address = src_address + size - 1;
+ while (src_address <= last_address) {
+ // Get the current physical address.
+ KPhysicalAddress phys_addr;
+ ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address));
+
+ // Determine the current read size.
+ const size_t cur_size =
+ std::min<size_t>(last_address - src_address + 1,
+ Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) -
+ GetInteger(src_address));
+
+ // Read.
+ R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
+
+ // Advance.
+ src_address += cur_size;
+ dst += cur_size;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size, KMemoryState state) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+ // We need to lock both this table, and the current process's table, so set up some aliases.
+ KPageTableBase& src_page_table = *this;
+ KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the desired range is writable io memory.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Read the memory.
+ KProcessAddress src = src_address;
+ const KProcessAddress last_address = dst_address + size - 1;
+ while (dst_address <= last_address) {
+ // Get the current physical address.
+ KPhysicalAddress phys_addr;
+ ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address));
+
+ // Determine the current read size.
+ const size_t cur_size =
+ std::min<size_t>(last_address - dst_address + 1,
+ Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) -
+ GetInteger(dst_address));
+
+ // Read.
+ R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
+
+ // Advance.
+ dst_address += cur_size;
+ src += cur_size;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
+ size_t size, KMemoryPermission perm,
+ bool is_aligned, bool check_heap) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ const KMemoryState test_state =
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+ size_t num_allocator_blocks;
+ KMemoryState old_state;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
+ std::addressof(num_allocator_blocks), address, size, test_state,
+ test_state, perm, perm,
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
+ KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
+
+ // Set whether the locked memory was io.
+ *out_is_io =
+ static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
+ bool check_heap) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ const KMemoryState test_state =
+ KMemoryState::FlagCanDeviceMap |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size, test_state, test_state,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
+ m_enable_device_address_space_merge
+ ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
+ : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ size_t allocator_num_blocks = 0;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator for the region.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, allocator_num_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(
+ std::addressof(allocator), address, num_pages,
+ m_enable_device_address_space_merge
+ ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare
+ : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm,
+ bool is_aligned) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ const KMemoryState test_state =
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, test_state, test_state, perm, perm,
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out,
+ KProcessAddress address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
+ size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
+ nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, nullptr));
+}
+
+Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm) {
+ R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
+ KMemoryState::FlagCanTransfer, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
+ KMemoryState::FlagCanTransfer, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
+ out, nullptr, address, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out,
+ KProcessAddress address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached,
+ KMemoryAttribute::None));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToUser(
+ KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+ KMemoryAttribute src_attr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+ // Get the destination memory reference.
+ auto& dst_memory = GetCurrentMemory(m_kernel);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ R_UNLESS(dst_memory.WriteBlock(dst_addr,
+ GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+ copy_size),
+ ResultInvalidCurrentMemory);
+
+ dst_addr += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ R_UNLESS(dst_memory.WriteBlock(
+ dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+ ResultInvalidCurrentMemory);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ dst_addr += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToKernel(
+ void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+ KMemoryAttribute src_attr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromUserToLinear(
+ KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Get the source memory reference.
+ auto& src_memory = GetCurrentMemory(m_kernel);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ R_UNLESS(src_memory.ReadBlock(src_addr,
+ GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+ copy_size),
+ ResultInvalidCurrentMemory);
+ src_addr += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ R_UNLESS(src_memory.ReadBlock(
+ src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+ ResultInvalidCurrentMemory);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ src_addr += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask,
+ KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask,
+ KMemoryAttribute dst_attr, void* buffer) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size);
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeap(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ // For convenience, alias this.
+ KPageTableBase& src_page_table = *this;
+
+ // Lightly validate the ranges before doing anything else.
+ R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check memory state.
+ R_TRY(src_page_table.CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+ R_TRY(dst_page_table.CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ // Get implementations.
+ auto& src_impl = src_page_table.GetImpl();
+ auto& dst_impl = dst_page_table.GetImpl();
+
+ // Prepare for traversal.
+ TraversalContext src_context;
+ TraversalContext dst_context;
+ TraversalEntry src_next_entry;
+ TraversalEntry dst_next_entry;
+ bool traverse_valid;
+
+ // Begin traversal.
+ traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context), src_addr);
+ ASSERT(traverse_valid);
+ traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+ KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+ size_t cur_src_size = src_next_entry.block_size -
+ (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+ size_t cur_dst_size = dst_next_entry.block_size -
+ (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+ // Adjust the initial block sizes.
+ src_next_entry.block_size = cur_src_size;
+ dst_next_entry.block_size = cur_dst_size;
+
+ // Before we get any crazier, succeed if there's nothing to do.
+ R_SUCCEED_IF(size == 0);
+
+ // We're going to manage dual traversal via an offset against the total size.
+ KPhysicalAddress cur_src_addr = cur_src_block_addr;
+ KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+ size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+ // Iterate.
+ size_t ofs = 0;
+ while (ofs < size) {
+ // Determine how much we can copy this iteration.
+ const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+ // If we need to advance the traversals, do so.
+ bool updated_src = false, updated_dst = false, skip_copy = false;
+ if (ofs + cur_copy_size != size) {
+ if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+ // Continue the src traversal.
+ traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context));
+ ASSERT(traverse_valid);
+
+ // Update source.
+ updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+ }
+
+ if (cur_dst_addr + cur_min_size ==
+ dst_next_entry.phys_addr + dst_next_entry.block_size) {
+ // Continue the dst traversal.
+ traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context));
+ ASSERT(traverse_valid);
+
+ // Update destination.
+ updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+ }
+
+ // If we didn't update either of source/destination, skip the copy this iteration.
+ if (!updated_src && !updated_dst) {
+ skip_copy = true;
+
+ // Update the source block address.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ }
+ }
+
+ // Do the copy, unless we're skipping it.
+ if (!skip_copy) {
+ // We need both ends of the copy to be heap blocks.
+ R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+ R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+ GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+ // Update.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+ cur_dst_block_addr = dst_next_entry.phys_addr;
+ cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+ // Advance offset.
+ ofs += cur_copy_size;
+ }
+
+ // Update min size.
+ cur_src_size = src_next_entry.block_size;
+ cur_dst_size = dst_next_entry.block_size;
+ cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+ cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ // For convenience, alias this.
+ KPageTableBase& src_page_table = *this;
+
+ // Lightly validate the ranges before doing anything else.
+ R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check memory state for source.
+ R_TRY(src_page_table.CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ // Destination state is intentionally unchecked.
+
+ // Get implementations.
+ auto& src_impl = src_page_table.GetImpl();
+ auto& dst_impl = dst_page_table.GetImpl();
+
+ // Prepare for traversal.
+ TraversalContext src_context;
+ TraversalContext dst_context;
+ TraversalEntry src_next_entry;
+ TraversalEntry dst_next_entry;
+ bool traverse_valid;
+
+ // Begin traversal.
+ traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context), src_addr);
+ ASSERT(traverse_valid);
+ traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+ KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+ size_t cur_src_size = src_next_entry.block_size -
+ (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+ size_t cur_dst_size = dst_next_entry.block_size -
+ (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+ // Adjust the initial block sizes.
+ src_next_entry.block_size = cur_src_size;
+ dst_next_entry.block_size = cur_dst_size;
+
+ // Before we get any crazier, succeed if there's nothing to do.
+ R_SUCCEED_IF(size == 0);
+
+ // We're going to manage dual traversal via an offset against the total size.
+ KPhysicalAddress cur_src_addr = cur_src_block_addr;
+ KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+ size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+ // Iterate.
+ size_t ofs = 0;
+ while (ofs < size) {
+ // Determine how much we can copy this iteration.
+ const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+ // If we need to advance the traversals, do so.
+ bool updated_src = false, updated_dst = false, skip_copy = false;
+ if (ofs + cur_copy_size != size) {
+ if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+ // Continue the src traversal.
+ traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context));
+ ASSERT(traverse_valid);
+
+ // Update source.
+ updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+ }
+
+ if (cur_dst_addr + cur_min_size ==
+ dst_next_entry.phys_addr + dst_next_entry.block_size) {
+ // Continue the dst traversal.
+ traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context));
+ ASSERT(traverse_valid);
+
+ // Update destination.
+ updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+ }
+
+ // If we didn't update either of source/destination, skip the copy this iteration.
+ if (!updated_src && !updated_dst) {
+ skip_copy = true;
+
+ // Update the source block address.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ }
+ }
+
+ // Do the copy, unless we're skipping it.
+ if (!skip_copy) {
+ // We need both ends of the copy to be heap blocks.
+ R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+ R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+ GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+ // Update.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+ cur_dst_block_addr = dst_next_entry.phys_addr;
+ cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+ // Advance offset.
+ ofs += cur_copy_size;
+ }
+
+ // Update min size.
+ cur_src_size = src_next_entry.block_size;
+ cur_dst_size = dst_next_entry.block_size;
+ cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+ cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size,
+ KMemoryPermission test_perm, KMemoryState dst_state) {
+ // Validate pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
+ test_perm == KMemoryPermission::UserRead);
+
+ // Check that the address is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get the source permission.
+ const auto src_perm = static_cast<KMemoryPermission>(
+ (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead);
+
+ // Get aligned extents.
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+
+ const auto aligned_src_last = GetInteger(aligned_src_end) - 1;
+ const auto mapping_src_last = GetInteger(mapping_src_end) - 1;
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
+ src_perm);
+ }
+ };
+
+ size_t blocks_needed = 0;
+
+ // Iterate, mapping as needed.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Validate the current block.
+ R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
+ test_attr_mask, KMemoryAttribute::None));
+
+ if (mapping_src_start < mapping_src_end &&
+ GetInteger(mapping_src_start) < info.GetEndAddress() &&
+ info.GetAddress() < GetInteger(mapping_src_end)) {
+ const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
+ ? info.GetAddress()
+ : GetInteger(mapping_src_start);
+ const auto cur_end = mapping_src_last >= info.GetLastAddress()
+ ? info.GetEndAddress()
+ : GetInteger(mapping_src_end);
+ const size_t cur_size = cur_end - cur_start;
+
+ if (info.GetAddress() < GetInteger(mapping_src_start)) {
+ ++blocks_needed;
+ }
+ if (mapping_src_last < info.GetLastAddress()) {
+ ++blocks_needed;
+ }
+
+ // Set the permissions on the block, if we need to.
+ if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
+ const DisableMergeAttribute head_body_attr =
+ (GetInteger(mapping_src_start) >= info.GetAddress())
+ ? DisableMergeAttribute::DisableHeadAndBody
+ : DisableMergeAttribute::None;
+ const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end))
+ ? DisableMergeAttribute::DisableTail
+ : DisableMergeAttribute::None;
+ const KPageProperties properties = {
+ src_perm, false, false,
+ static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Note that we mapped this part.
+ mapped_size += cur_size;
+ }
+
+ // If the block is at the end, we're done.
+ if (aligned_src_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+
+ if (out_blocks_needed != nullptr) {
+ ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ *out_blocks_needed = blocks_needed;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
+ KProcessAddress src_addr, KMemoryPermission test_perm,
+ KMemoryState dst_state, KPageTableBase& src_page_table,
+ bool send) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(src_page_table.IsLockedByCurrentThread());
+
+ // Check that we can theoretically map.
+ const KProcessAddress region_start = m_alias_region_start;
+ const size_t region_size = m_alias_region_end - m_alias_region_start;
+ R_UNLESS(size < region_size, ResultOutOfAddressSpace);
+
+ // Get aligned source extents.
+ const KProcessAddress src_start = src_addr;
+ const KProcessAddress src_end = src_addr + size;
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
+ const KProcessAddress mapping_src_end =
+ Common::AlignDown(GetInteger(src_start) + size, PageSize);
+ const size_t aligned_src_size = aligned_src_end - aligned_src_start;
+ const size_t mapping_src_size =
+ (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
+
+ // Select a random address to map at.
+ KProcessAddress dst_addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const size_t offset = GetInteger(aligned_src_start) & (alignment - 1);
+
+ dst_addr =
+ this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
+ alignment, offset, this->GetNumGuardPages());
+ R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
+ }
+
+ // Check that we can perform the operation we're about to perform.
+ ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reserve space for any partial pages we allocate.
+ const size_t unmapped_size = aligned_src_size - mapping_src_size;
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Ensure that we manage page references correctly.
+ KPhysicalAddress start_partial_page = 0;
+ KPhysicalAddress end_partial_page = 0;
+ KProcessAddress cur_mapped_addr = dst_addr;
+
+ // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
+ // free on scope exit.
+ SCOPE_EXIT({
+ if (start_partial_page != 0) {
+ m_kernel.MemoryManager().Close(start_partial_page, 1);
+ }
+ if (end_partial_page != 0) {
+ m_kernel.MemoryManager().Close(end_partial_page, 1);
+ }
+ });
+
+ ON_RESULT_FAILURE {
+ if (cur_mapped_addr != dst_addr) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_addr,
+ (cur_mapped_addr - dst_addr) / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ }
+ };
+
+ // Allocate the start page as needed.
+ if (aligned_src_start < mapping_src_start) {
+ start_partial_page =
+ m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Allocate the end page as needed.
+ if (mapping_src_end < aligned_src_end &&
+ (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
+ end_partial_page =
+ m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Get the implementation.
+ auto& src_impl = src_page_table.GetImpl();
+
+ // Get the fill value for partial pages.
+ const auto fill_val = m_ipc_fill_value;
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry),
+ std::addressof(context), aligned_src_start);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_block_addr = next_entry.phys_addr;
+ size_t cur_block_size =
+ next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1));
+ size_t tot_block_size = cur_block_size;
+
+ // Map the start page, if we have one.
+ if (start_partial_page != 0) {
+ // Ensure the page holds correct data.
+ u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page);
+ if (send) {
+ const size_t partial_offset = src_start - aligned_src_start;
+ size_t copy_size, clear_size;
+ if (src_end < mapping_src_start) {
+ copy_size = size;
+ clear_size = mapping_src_start - src_end;
+ } else {
+ copy_size = mapping_src_start - src_start;
+ clear_size = 0;
+ }
+
+ std::memset(start_partial_virt, fill_val, partial_offset);
+ std::memcpy(start_partial_virt + partial_offset,
+ GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset,
+ copy_size);
+ if (clear_size > 0) {
+ std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size);
+ }
+ } else {
+ std::memset(start_partial_virt, fill_val, PageSize);
+ }
+
+ // Map the page.
+ const KPageProperties start_map_properties = {test_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true,
+ start_map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += PageSize;
+ cur_block_addr += PageSize;
+ cur_block_size -= PageSize;
+
+ // If the block's size was one page, we may need to continue traversal.
+ if (cur_block_size == 0 && aligned_src_size > PageSize) {
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ tot_block_size += next_entry.block_size;
+ }
+ }
+
+ // Map the remaining pages.
+ while (aligned_src_start + tot_block_size < mapping_src_end) {
+ // Continue the traversal.
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ // Process the block.
+ if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
+ // Map the block we've been processing so far.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize,
+ cur_block_addr, true, map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += cur_block_size;
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ } else {
+ cur_block_size += next_entry.block_size;
+ }
+ tot_block_size += next_entry.block_size;
+ }
+
+ // Handle the last direct-mapped page.
+ if (const KProcessAddress mapped_block_end =
+ aligned_src_start + tot_block_size - cur_block_size;
+ mapped_block_end < mapping_src_end) {
+ const size_t last_block_size = mapping_src_end - mapped_block_end;
+
+ // Map the last block.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize,
+ cur_block_addr, true, map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += last_block_size;
+ cur_block_addr += last_block_size;
+ if (mapped_block_end + cur_block_size < aligned_src_end &&
+ cur_block_size == last_block_size) {
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ }
+ }
+
+ // Map the end page, if we have one.
+ if (end_partial_page != 0) {
+ // Ensure the page holds correct data.
+ u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page);
+ if (send) {
+ const size_t copy_size = src_end - mapping_src_end;
+ std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr),
+ copy_size);
+ std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size);
+ } else {
+ std::memset(end_partial_virt, fill_val, PageSize);
+ }
+
+ // Map the page.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true,
+ map_properties, OperationType::Map, false));
+ }
+
+ // Update memory blocks to reflect our changes
+ m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
+ dst_state, test_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Set the output address.
+ *out_addr = dst_addr + (src_start - aligned_src_start);
+
+ // We succeeded.
+ memory_reservation.Commit();
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size,
+ KProcessAddress src_addr, KPageTableBase& src_page_table,
+ KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
+ // For convenience, alias this.
+ KPageTableBase& dst_page_table = *this;
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(std::addressof(src_page_table));
+
+ // Perform client setup.
+ size_t num_allocator_blocks;
+ R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
+ std::addressof(num_allocator_blocks), src_addr, size,
+ test_perm, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ src_page_table.m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Get the mapped extents.
+ const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
+ const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
+ const size_t src_map_size = src_map_end - src_map_start;
+
+ // Ensure that we clean up appropriately if we fail after this.
+ const auto src_perm = static_cast<KMemoryPermission>(
+ (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead);
+ ON_RESULT_FAILURE {
+ if (src_map_end > src_map_start) {
+ src_page_table.CleanupForIpcClientOnServerSetupFailure(
+ updater.GetPageList(), src_map_start, src_map_size, src_perm);
+ }
+ };
+
+ // Perform server setup.
+ R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
+ src_page_table, send));
+
+ // If anything was mapped, ipc-lock the pages.
+ if (src_map_start < src_map_end) {
+ // Get the source permission.
+ src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
+ (src_map_end - src_map_start) / PageSize,
+ &KMemoryBlock::LockForIpc, src_perm);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, dst_state, KMemoryPermission::UserRead,
+ KMemoryPermission::UserRead, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Get aligned extents.
+ const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const size_t aligned_size = aligned_end - aligned_start;
+ const size_t aligned_num_pages = aligned_size / PageSize;
+
+ // Unmap the pages.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+
+ // Update memory blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
+ KMemoryState::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ // Release from the resource limit as relevant.
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ aligned_size - mapping_size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get aligned source extents.
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_last = mapping_end - 1;
+ const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
+
+ // If nothing was mapped, we're actually done immediately.
+ R_SUCCEED_IF(mapping_size == 0);
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Lock the table.
+ // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
+ // convention elsewhere in KPageTableBase.
+ KScopedLightLock lk(m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ // Determine where the mapping ends.
+ const auto mapped_end = GetInteger(mapping_start) + mapped_size;
+ const auto mapped_last = mapped_end - 1;
+
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Get the current block info.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+
+ // Create tracking variables.
+ KProcessAddress cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first = cur_info.GetIpcDisableMergeCount() == 1 &&
+ False(cur_info.GetDisableMergeAttribute() &
+ KMemoryBlockDisableMergeAttribute::Locked);
+
+ while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Check if we can consolidate the next block's permission set with the current one.
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a
+ // single call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_info.GetPermission(), false, false,
+ first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+ : DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+ cur_size / PageSize, 0, false, properties,
+ OperationType::ChangePermissions, true));
+ }
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ if ((first || cur_needs_set_perm) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_info.GetPermission(), false, false,
+ first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+ : DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+ false, properties, OperationType::ChangePermissions, true));
+ }
+ }
+ };
+
+ // Iterate, reprotecting as needed.
+ {
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Validate the current block.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+ R_ASSERT(this->CheckMemoryState(
+ cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+ // Create tracking variables.
+ KProcessAddress cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first =
+ cur_info.GetIpcDisableMergeCount() == 1 &&
+ False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked);
+
+ while ((cur_address + cur_size - 1) < mapping_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Validate the next block.
+ R_ASSERT(this->CheckMemoryState(
+ next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+ // Check if we can consolidate the next block's permission set with the current one.
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a single
+ // call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_needs_set_perm ? cur_info.GetOriginalPermission()
+ : cur_info.GetPermission(),
+ false, false,
+ first ? DisableMergeAttribute::EnableHeadAndBody
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+ false, properties, OperationType::ChangePermissions,
+ false));
+ }
+
+ // Mark that we mapped the block.
+ mapped_size += cur_size;
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ const auto lock_count =
+ cur_info.GetIpcLockCount() +
+ (next_it != m_memory_block_manager.end()
+ ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+ : 0);
+ if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
+ const DisableMergeAttribute head_body_attr =
+ first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None;
+ const DisableMergeAttribute tail_attr =
+ lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None;
+ const KPageProperties properties = {
+ cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(),
+ false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false,
+ properties, OperationType::ChangePermissions, false));
+ }
+ }
+
+ // Create an update allocator.
+ // NOTE: Guaranteed zero blocks needed here.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, 0);
+ R_TRY(allocator_result);
+
+ // Unlock the pages.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
+ mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list,
+ KProcessAddress address, size_t size,
+ KMemoryPermission prot_perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+
+ // Get the mapped extents.
+ const KProcessAddress src_map_start = address;
+ const KProcessAddress src_map_end = address + size;
+ const KProcessAddress src_map_last = src_map_end - 1;
+
+ // This function is only invoked when there's something to do.
+ ASSERT(src_map_end > src_map_start);
+
+ // Iterate over blocks, fixing permissions.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
+ ? info.GetAddress()
+ : GetInteger(src_map_start);
+ const auto cur_end =
+ src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
+
+ // If we can, fix the protections on the block.
+ if ((info.GetIpcLockCount() == 0 &&
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
+ (info.GetIpcLockCount() != 0 &&
+ (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
+ // Check if we actually need to fix the protections on the block.
+ if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
+ const bool start_nc = (info.GetAddress() == GetInteger(src_map_start))
+ ? (False(info.GetDisableMergeAttribute() &
+ (KMemoryBlockDisableMergeAttribute::Locked |
+ KMemoryBlockDisableMergeAttribute::IpcLeft)))
+ : info.GetAddress() <= GetInteger(src_map_start);
+
+ const DisableMergeAttribute head_body_attr =
+ start_nc ? DisableMergeAttribute::EnableHeadAndBody
+ : DisableMergeAttribute::None;
+ DisableMergeAttribute tail_attr;
+ if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
+ auto next_it = it;
+ ++next_it;
+
+ const auto lock_count =
+ info.GetIpcLockCount() +
+ (next_it != m_memory_block_manager.end()
+ ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+ : 0);
+ tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail
+ : DisableMergeAttribute::None;
+ } else {
+ tail_attr = DisableMergeAttribute::None;
+ }
+
+ const KPageProperties properties = {
+ info.GetPermission(), false, false,
+ static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0,
+ false, properties, OperationType::ChangePermissions, true));
+ }
+ }
+
+ // If we're past the end of the region, we're done.
+ if (src_map_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+}
+
+Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+ // Calculate the last address for convenience.
+ const KProcessAddress last_address = address + size - 1;
+
+ // Define iteration variables.
+ KProcessAddress cur_address;
+ size_t mapped_size;
+
+ // The entire mapping process can be retried.
+ while (true) {
+ // Check if the memory is already mapped.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size mapped is the size requested, we've nothing to do.
+ R_SUCCEED_IF(size == mapped_size);
+ }
+
+ // Allocate and map the memory.
+ {
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the new memory.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateForProcess(
+ std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option,
+ GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
+
+ // If we fail in the next bit (or retry), we need to cleanup the pages.
+ auto pg_guard = SCOPE_GUARD({
+ pg.OpenFirst();
+ pg.Close();
+ });
+
+ // Map the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ size_t num_allocator_blocks = 0;
+
+ // Verify that nobody has mapped memory since we first checked.
+ {
+ // Iterate over the memory.
+ size_t checked_mapped_size = 0;
+ cur_address = address;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ if (is_free) {
+ if (info.GetAddress() < GetInteger(address)) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (!is_free) {
+ checked_mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (!is_free) {
+ checked_mapped_size +=
+ KProcessAddress(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size now isn't what it was before, somebody mapped or unmapped
+ // concurrently. If this happened, retry.
+ if (mapped_size != checked_mapped_size) {
+ continue;
+ }
+ }
+
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Prepare to iterate over the memory.
+ auto pg_it = pg.begin();
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ pg_guard.Cancel();
+ cur_address = address;
+ ON_RESULT_FAILURE {
+ if (cur_address > address) {
+ const KProcessAddress last_unmap_address = cur_address - 1;
+
+ // Iterate, unmapping the pages.
+ cur_address = address;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is free, we mapped it and need to unmap it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to unmap.
+ const KPageProperties unmap_properties = {
+ KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ const size_t cur_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_unmap_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+ cur_pages, 0, false, unmap_properties,
+ OperationType::Unmap, true));
+ }
+
+ // Check if we're done.
+ if (last_unmap_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ }
+
+ // Release any remaining unmapped memory.
+ m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
+ m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages);
+ for (++pg_it; pg_it != pg.end(); ++pg_it) {
+ m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(),
+ pg_it->GetNumPages());
+ m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages());
+ }
+ };
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If it's unmapped, we need to map it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to map.
+ const KPageProperties map_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ cur_address == this->GetAliasRegionStart()
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ size_t map_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ {
+ // Create a page group for the current mapping range.
+ KPageGroup cur_pg(m_kernel, m_block_info_manager);
+ {
+ ON_RESULT_FAILURE_2 {
+ cur_pg.OpenFirst();
+ cur_pg.Close();
+ };
+
+ size_t remain_pages = map_pages;
+ while (remain_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Add whatever we can to the current block.
+ const size_t cur_pages = std::min(pg_pages, remain_pages);
+ R_TRY(cur_pg.AddBlock(pg_phys_addr +
+ ((pg_pages - cur_pages) * PageSize),
+ cur_pages));
+
+ // Advance.
+ remain_pages -= cur_pages;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Map the papges.
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
+ cur_pg, map_properties,
+ OperationType::MapFirstGroup, false));
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Increase our tracked mapped size.
+ m_mapped_physical_memory_size += (size - mapped_size);
+
+ // Update the relevant memory blocks.
+ m_memory_block_manager.UpdateIfMatch(
+ std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ address == this->GetAliasRegionStart()
+ ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+ }
+ }
+ }
+}
+
+Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Calculate the last address for convenience.
+ const KProcessAddress last_address = address + size - 1;
+
+ // Define iteration variables.
+ KProcessAddress map_start_address = 0;
+ KProcessAddress map_last_address = 0;
+
+ KProcessAddress cur_address;
+ size_t mapped_size;
+ size_t num_allocator_blocks = 0;
+
+ // Check if the memory is mapped.
+ {
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Verify the memory's state.
+ const bool is_normal = info.GetState() == KMemoryState::Normal &&
+ info.GetAttribute() == KMemoryAttribute::None;
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+ if (is_normal) {
+ R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+ if (map_start_address == 0) {
+ map_start_address = cur_address;
+ }
+ map_last_address =
+ (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
+
+ if (info.GetAddress() < GetInteger(address)) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+
+ mapped_size += (map_last_address + 1 - cur_address);
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If there's nothing mapped, we've nothing to do.
+ R_SUCCEED_IF(mapped_size == 0);
+ }
+
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Separate the mapping.
+ const KPageProperties sep_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), map_start_address,
+ (map_last_address + 1 - map_start_address) / PageSize, 0, false,
+ sep_properties, OperationType::Separate, false));
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+
+ // Iterate over the memory, unmapping as we go.
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+
+ const auto clear_merge_attr =
+ (it->GetState() == KMemoryState::Normal &&
+ it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
+ ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None;
+
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is normal, we need to unmap it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // Release the memory resource.
+ m_mapped_physical_memory_size -= mapped_size;
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size);
+
+ // Update memory blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ clear_merge_attr);
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+ KPageTableBase& src_page_table,
+ KProcessAddress src_address) {
+ // We need to lock both this table, and the current process's table, so set up an alias.
+ KPageTableBase& dst_page_table = *this;
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the memory is mapped in the destination process.
+ size_t num_allocator_blocks;
+ R_TRY(dst_page_table.CheckMemoryState(
+ std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+ KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Check that the memory is mapped in the source process.
+ R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Validate that the memory ranges are compatible.
+ {
+ // Define a helper type.
+ struct ContiguousRangeInfo {
+ public:
+ KPageTableBase& m_pt;
+ TraversalContext m_context;
+ TraversalEntry m_entry;
+ KPhysicalAddress m_phys_addr;
+ size_t m_cur_size;
+ size_t m_remaining_size;
+
+ public:
+ ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size)
+ : m_pt(pt), m_remaining_size(size) {
+ // Begin a traversal.
+ ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry),
+ std::addressof(m_context), address));
+
+ // Setup tracking fields.
+ m_phys_addr = m_entry.phys_addr;
+ m_cur_size = std::min<size_t>(
+ m_remaining_size,
+ m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
+
+ // Consume the whole contiguous block.
+ this->DetermineContiguousBlockExtents();
+ }
+
+ void ContinueTraversal() {
+ // Update our remaining size.
+ m_remaining_size = m_remaining_size - m_cur_size;
+
+ // Update our tracking fields.
+ if (m_remaining_size > 0) {
+ m_phys_addr = m_entry.phys_addr;
+ m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
+
+ // Consume the whole contiguous block.
+ this->DetermineContiguousBlockExtents();
+ }
+ }
+
+ private:
+ void DetermineContiguousBlockExtents() {
+ // Continue traversing until we're not contiguous, or we have enough.
+ while (m_cur_size < m_remaining_size) {
+ ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry),
+ std::addressof(m_context)));
+
+ // If we're not contiguous, we're done.
+ if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
+ break;
+ }
+
+ // Update our current size.
+ m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
+ }
+ }
+ };
+
+ // Create ranges for both tables.
+ ContiguousRangeInfo src_range(src_page_table, src_address, size);
+ ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
+
+ // Validate the ranges.
+ while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
+ R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion);
+ R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion);
+
+ src_range.ContinueTraversal();
+ dst_range.ContinueTraversal();
+ }
+ }
+
+ // We no longer need to hold our lock on the source page table.
+ lk.TryUnlockHalf(src_page_table.m_general_lock);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the memory.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+ size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid,
+ const KPageProperties properties, OperationType operation,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(num_pages > 0);
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ ASSERT(this->ContainsPages(virt_addr, num_pages));
+
+ // As we don't allocate page entries in guest memory, we don't need to allocate them from
+ // or free them to the page list, and so it goes unused (along with page properties).
+
+ switch (operation) {
+ case OperationType::Unmap: {
+ // Ensure that any pages we track are closed on exit.
+ KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
+ SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
+
+ // Make a page group representing the region to unmap.
+ this->MakePageGroup(pages_to_close, virt_addr, num_pages);
+
+ // Unmap.
+ m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
+
+ R_SUCCEED();
+ }
+ case OperationType::Map: {
+ ASSERT(virt_addr != 0);
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
+ ConvertToMemoryPermission(properties.perm));
+
+ // Open references to pages, if we should.
+ if (this->IsHeapPhysicalAddress(phys_addr)) {
+ m_kernel.MemoryManager().Open(phys_addr, num_pages);
+ }
+
+ R_SUCCEED();
+ }
+ case OperationType::Separate: {
+ // TODO: Unimplemented.
+ R_SUCCEED();
+ }
+ case OperationType::ChangePermissions:
+ case OperationType::ChangePermissionsAndRefresh:
+ case OperationType::ChangePermissionsAndRefreshAndFlush: {
+ m_memory->ProtectRegion(*m_impl, virt_addr, num_pages * PageSize,
+ ConvertToMemoryPermission(properties.perm));
+ R_SUCCEED();
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+ size_t num_pages, const KPageGroup& page_group,
+ const KPageProperties properties, OperationType operation,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ ASSERT(num_pages > 0);
+ ASSERT(num_pages == page_group.GetNumPages());
+
+ // As we don't allocate page entries in guest memory, we don't need to allocate them from
+ // the page list, and so it goes unused (along with page properties).
+
+ switch (operation) {
+ case OperationType::MapGroup:
+ case OperationType::MapFirstGroup: {
+ // We want to maintain a new reference to every page in the group.
+ KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
+
+ for (const auto& node : page_group) {
+ const size_t size{node.GetNumPages() * PageSize};
+
+ // Map the pages.
+ m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
+ ConvertToMemoryPermission(properties.perm));
+
+ virt_addr += size;
+ }
+
+ // We succeeded! We want to persist the reference to the pages.
+ spg.CancelClose();
+
+ R_SUCCEED();
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) {
+ while (page_list->Peek()) {
+ [[maybe_unused]] auto page = page_list->Pop();
+
+ // TODO: Free page entries once they are allocated in guest memory.
+ // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
+ // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
+ // this->GetPageTableManager().Free(page);
+ }
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
new file mode 100644
index 000000000..556d230b3
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -0,0 +1,760 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_funcs.h"
+#include "common/page_table.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_block_manager.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/result.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+enum class DisableMergeAttribute : u8 {
+ None = (0U << 0),
+
+ DisableHead = (1U << 0),
+ DisableHeadAndBody = (1U << 1),
+ EnableHeadAndBody = (1U << 2),
+ DisableTail = (1U << 3),
+ EnableTail = (1U << 4),
+ EnableAndMergeHeadBodyTail = (1U << 5),
+
+ EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
+ DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
+};
+DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute);
+
+struct KPageProperties {
+ KMemoryPermission perm;
+ bool io;
+ bool uncached;
+ DisableMergeAttribute disable_merge_attributes;
+};
+static_assert(std::is_trivial_v<KPageProperties>);
+static_assert(sizeof(KPageProperties) == sizeof(u32));
+
+class KResourceLimit;
+class KSystemResource;
+
+class KPageTableBase {
+ YUZU_NON_COPYABLE(KPageTableBase);
+ YUZU_NON_MOVEABLE(KPageTableBase);
+
+public:
+ using TraversalEntry = Common::PageTable::TraversalEntry;
+ using TraversalContext = Common::PageTable::TraversalContext;
+
+ class MemoryRange {
+ private:
+ KernelCore& m_kernel;
+ KPhysicalAddress m_address;
+ size_t m_size;
+ bool m_heap;
+
+ public:
+ explicit MemoryRange(KernelCore& kernel)
+ : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {}
+
+ void Set(KPhysicalAddress address, size_t size, bool heap) {
+ m_address = address;
+ m_size = size;
+ m_heap = heap;
+ }
+
+ KPhysicalAddress GetAddress() const {
+ return m_address;
+ }
+ size_t GetSize() const {
+ return m_size;
+ }
+ bool IsHeap() const {
+ return m_heap;
+ }
+
+ void Open();
+ void Close();
+ };
+
+protected:
+ enum MemoryFillValue : u8 {
+ MemoryFillValue_Zero = 0,
+ MemoryFillValue_Stack = 'X',
+ MemoryFillValue_Ipc = 'Y',
+ MemoryFillValue_Heap = 'Z',
+ };
+
+ enum class OperationType {
+ Map = 0,
+ MapGroup = 1,
+ MapFirstGroup = 2,
+ Unmap = 3,
+ ChangePermissions = 4,
+ ChangePermissionsAndRefresh = 5,
+ ChangePermissionsAndRefreshAndFlush = 6,
+ Separate = 7,
+ };
+
+ static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
+ static constexpr size_t RegionAlignment = 2_MiB;
+ static_assert(RegionAlignment == KernelAslrAlignment);
+
+ struct PageLinkedList {
+ private:
+ struct Node {
+ Node* m_next;
+ std::array<u8, PageSize - sizeof(Node*)> m_buffer;
+ };
+ static_assert(std::is_trivial_v<Node>);
+
+ private:
+ Node* m_root{};
+
+ public:
+ constexpr PageLinkedList() : m_root(nullptr) {}
+
+ void Push(Node* n) {
+ ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
+ n->m_next = m_root;
+ m_root = n;
+ }
+
+ Node* Peek() const {
+ return m_root;
+ }
+
+ Node* Pop() {
+ Node* const r = m_root;
+
+ m_root = r->m_next;
+ r->m_next = nullptr;
+
+ return r;
+ }
+ };
+ static_assert(std::is_trivially_destructible_v<PageLinkedList>);
+
+ static constexpr auto DefaultMemoryIgnoreAttr =
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
+
+ static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) {
+ switch (static_cast<Svc::CreateProcessFlag>(as_type &
+ Svc::CreateProcessFlag::AddressSpaceMask)) {
+ case Svc::CreateProcessFlag::AddressSpace64Bit:
+ return 39;
+ case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
+ return 36;
+ case Svc::CreateProcessFlag::AddressSpace32Bit:
+ case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
+ return 32;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+private:
+ class KScopedPageTableUpdater {
+ private:
+ KPageTableBase* m_pt;
+ PageLinkedList m_ll;
+
+ public:
+ explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {}
+ explicit KScopedPageTableUpdater(KPageTableBase& pt)
+ : KScopedPageTableUpdater(std::addressof(pt)) {}
+ ~KScopedPageTableUpdater() {
+ m_pt->FinalizeUpdate(this->GetPageList());
+ }
+
+ PageLinkedList* GetPageList() {
+ return std::addressof(m_ll);
+ }
+ };
+
+private:
+ KernelCore& m_kernel;
+ Core::System& m_system;
+ KProcessAddress m_address_space_start{};
+ KProcessAddress m_address_space_end{};
+ KProcessAddress m_heap_region_start{};
+ KProcessAddress m_heap_region_end{};
+ KProcessAddress m_current_heap_end{};
+ KProcessAddress m_alias_region_start{};
+ KProcessAddress m_alias_region_end{};
+ KProcessAddress m_stack_region_start{};
+ KProcessAddress m_stack_region_end{};
+ KProcessAddress m_kernel_map_region_start{};
+ KProcessAddress m_kernel_map_region_end{};
+ KProcessAddress m_alias_code_region_start{};
+ KProcessAddress m_alias_code_region_end{};
+ KProcessAddress m_code_region_start{};
+ KProcessAddress m_code_region_end{};
+ size_t m_max_heap_size{};
+ size_t m_mapped_physical_memory_size{};
+ size_t m_mapped_unsafe_physical_memory{};
+ size_t m_mapped_insecure_memory{};
+ size_t m_mapped_ipc_server_memory{};
+ mutable KLightLock m_general_lock;
+ mutable KLightLock m_map_physical_memory_lock;
+ KLightLock m_device_map_lock;
+ std::unique_ptr<Common::PageTable> m_impl{};
+ Core::Memory::Memory* m_memory{};
+ KMemoryBlockManager m_memory_block_manager{};
+ u32 m_allocate_option{};
+ u32 m_address_space_width{};
+ bool m_is_kernel{};
+ bool m_enable_aslr{};
+ bool m_enable_device_address_space_merge{};
+ KMemoryBlockSlabManager* m_memory_block_slab_manager{};
+ KBlockInfoManager* m_block_info_manager{};
+ KResourceLimit* m_resource_limit{};
+ const KMemoryRegion* m_cached_physical_linear_region{};
+ const KMemoryRegion* m_cached_physical_heap_region{};
+ MemoryFillValue m_heap_fill_value{};
+ MemoryFillValue m_ipc_fill_value{};
+ MemoryFillValue m_stack_fill_value{};
+
+public:
+ explicit KPageTableBase(KernelCore& kernel);
+ ~KPageTableBase();
+
+ Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end,
+ Core::Memory::Memory& memory);
+ Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+ bool enable_device_address_space_merge, bool from_back,
+ KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit, Core::Memory::Memory& memory,
+ KProcessAddress aslr_space_start);
+
+ void Finalize();
+
+ bool IsKernel() const {
+ return m_is_kernel;
+ }
+ bool IsAslrEnabled() const {
+ return m_enable_aslr;
+ }
+
+ bool Contains(KProcessAddress addr) const {
+ return m_address_space_start <= addr && addr <= m_address_space_end - 1;
+ }
+
+ bool Contains(KProcessAddress addr, size_t size) const {
+ return m_address_space_start <= addr && addr < addr + size &&
+ addr + size - 1 <= m_address_space_end - 1;
+ }
+
+ bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+ return this->Contains(addr, size) && m_alias_region_start <= addr &&
+ addr + size - 1 <= m_alias_region_end - 1;
+ }
+
+ bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+ return this->Contains(addr, size) && m_heap_region_start <= addr &&
+ addr + size - 1 <= m_heap_region_end - 1;
+ }
+
+ bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+ // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the
+ // alias code region.
+ return this->CanContain(addr, size, Svc::MemoryState::AliasCode);
+ }
+
+ KScopedLightLock AcquireDeviceMapLock() {
+ return KScopedLightLock(m_device_map_lock);
+ }
+
+ KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
+ size_t GetRegionSize(Svc::MemoryState state) const;
+ bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
+
+ KProcessAddress GetRegionAddress(KMemoryState state) const {
+ return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+ size_t GetRegionSize(KMemoryState state) const {
+ return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ return this->CanContain(addr, size,
+ static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+
+public:
+ Core::Memory::Memory& GetMemory() {
+ return *m_memory;
+ }
+
+ Core::Memory::Memory& GetMemory() const {
+ return *m_memory;
+ }
+
+ Common::PageTable& GetImpl() {
+ return *m_impl;
+ }
+
+ Common::PageTable& GetImpl() const {
+ return *m_impl;
+ }
+
+ size_t GetNumGuardPages() const {
+ return this->IsKernel() ? 1 : 4;
+ }
+
+protected:
+ // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions
+ // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived
+ // class, and this avoids unnecessary virtual function calls.
+ Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+ KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties,
+ OperationType operation, bool reuse_ll);
+ Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+ const KPageGroup& page_group, const KPageProperties properties,
+ OperationType operation, bool reuse_ll);
+ void FinalizeUpdate(PageLinkedList* page_list);
+
+ bool IsLockedByCurrentThread() const {
+ return m_general_lock.IsLockedByCurrentThread();
+ }
+
+ bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+ m_cached_physical_linear_region, phys_addr);
+ }
+
+ bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+ m_cached_physical_linear_region, phys_addr, size);
+ }
+
+ bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr);
+ }
+
+ bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr, size);
+ }
+
+ bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr);
+ }
+
+ bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
+ return (m_address_space_start <= addr) &&
+ (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
+ (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
+ }
+
+private:
+ KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const;
+
+ Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
+ perm, attr_mask, attr));
+ }
+
+ Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
+ state_mask, state, perm_mask, perm, attr_mask, attr,
+ ignore_attr));
+ }
+ Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
+ attr_mask, attr, ignore_attr));
+ }
+
+ Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr,
+ size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageGroup* pg);
+
+ Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+ KProcessAddress address) const;
+
+ Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+ Svc::MemoryState state) const;
+
+ Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm);
+ Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
+
+ void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg);
+
+ Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+
+ Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
+
+ Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr,
+ size_t size, KMemoryState state, KMemoryPermission perm);
+ Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size,
+ KMemoryState state);
+ Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size,
+ KMemoryState state);
+
+ Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size, KMemoryPermission test_perm,
+ KMemoryState dst_state);
+ Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
+ KMemoryPermission test_perm, KMemoryState dst_state,
+ KPageTableBase& src_page_table, bool send);
+ void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
+ size_t size, KMemoryPermission prot_perm);
+
+ size_t GetSize(KMemoryState state) const;
+
+ bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+ // Validate pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return this->GetImpl().GetPhysicalAddress(out, virt_addr);
+ }
+
+public:
+ bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+ // Validate pre-conditions.
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Acquire exclusive access to the table while doing address translation.
+ KScopedLightLock lk(m_general_lock);
+
+ return this->GetPhysicalAddressLocked(out, virt_addr);
+ }
+
+ KBlockInfoManager* GetBlockInfoManager() const {
+ return m_block_info_manager;
+ }
+
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr);
+ Result SetHeapSize(KProcessAddress* out, size_t size);
+ Result SetMaxHeapSize(size_t size);
+ Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const;
+ Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const;
+ Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+ R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static));
+ }
+ Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+ R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io));
+ }
+ Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+ Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping, Svc::MemoryPermission perm);
+ Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping);
+ Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+ Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
+ Result MapInsecureMemory(KProcessAddress address, size_t size);
+ Result UnmapInsecureMemory(KProcessAddress address, size_t size);
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
+ region_num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
+
+ Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
+
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+ Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
+ Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
+
+ Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state);
+
+ Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state);
+
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned, bool check_heap);
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
+
+ Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
+ Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
+
+ Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned);
+ Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address,
+ size_t size);
+
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
+
+ Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm);
+ Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size);
+ Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+
+ Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address,
+ size_t size);
+
+ Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state,
+ KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr);
+ Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ void* buffer);
+ Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr,
+ size_t size, KMemoryState dst_state_mask,
+ KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KPageTableBase& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send);
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
+
+ Result MapPhysicalMemory(KProcessAddress address, size_t size);
+ Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
+
+ Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+ Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+
+ Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt,
+ KProcessAddress src_address);
+
+public:
+ KProcessAddress GetAddressSpaceStart() const {
+ return m_address_space_start;
+ }
+ KProcessAddress GetHeapRegionStart() const {
+ return m_heap_region_start;
+ }
+ KProcessAddress GetAliasRegionStart() const {
+ return m_alias_region_start;
+ }
+ KProcessAddress GetStackRegionStart() const {
+ return m_stack_region_start;
+ }
+ KProcessAddress GetKernelMapRegionStart() const {
+ return m_kernel_map_region_start;
+ }
+ KProcessAddress GetCodeRegionStart() const {
+ return m_code_region_start;
+ }
+ KProcessAddress GetAliasCodeRegionStart() const {
+ return m_alias_code_region_start;
+ }
+
+ size_t GetAddressSpaceSize() const {
+ return m_address_space_end - m_address_space_start;
+ }
+ size_t GetHeapRegionSize() const {
+ return m_heap_region_end - m_heap_region_start;
+ }
+ size_t GetAliasRegionSize() const {
+ return m_alias_region_end - m_alias_region_start;
+ }
+ size_t GetStackRegionSize() const {
+ return m_stack_region_end - m_stack_region_start;
+ }
+ size_t GetKernelMapRegionSize() const {
+ return m_kernel_map_region_end - m_kernel_map_region_start;
+ }
+ size_t GetCodeRegionSize() const {
+ return m_code_region_end - m_code_region_start;
+ }
+ size_t GetAliasCodeRegionSize() const {
+ return m_alias_code_region_end - m_alias_code_region_start;
+ }
+
+ size_t GetNormalMemorySize() const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
+ }
+
+ size_t GetCodeSize() const;
+ size_t GetCodeDataSize() const;
+ size_t GetAliasCodeSize() const;
+ size_t GetAliasCodeDataSize() const;
+
+ u32 GetAllocateOption() const {
+ return m_allocate_option;
+ }
+
+ u32 GetAddressSpaceWidth() const {
+ return m_address_space_width;
+ }
+
+public:
+ // Linear mapped
+ static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+ return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+ }
+
+ static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel,
+ KVirtualAddress addr) {
+ return kernel.MemoryLayout().GetLinearPhysicalAddress(addr);
+ }
+
+ static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel,
+ KPhysicalAddress addr) {
+ return kernel.MemoryLayout().GetLinearVirtualAddress(addr);
+ }
+
+ // Heap
+ static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+ return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+ }
+
+ static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) {
+ return GetLinearMappedPhysicalAddress(kernel, addr);
+ }
+
+ static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) {
+ return GetLinearMappedVirtualAddress(kernel, addr);
+ }
+
+ // Member heap
+ u8* GetHeapVirtualPointer(KPhysicalAddress addr) {
+ return GetHeapVirtualPointer(m_kernel, addr);
+ }
+
+ KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
+ return GetHeapPhysicalAddress(m_kernel, addr);
+ }
+
+ KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
+ return GetHeapVirtualAddress(m_kernel, addr);
+ }
+
+ // TODO: GetPageTableVirtualAddress
+ // TODO: GetPageTablePhysicalAddress
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 7fa34d693..6c29eb72c 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1,515 +1,598 @@
-// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <algorithm>
-#include <bitset>
-#include <ctime>
-#include <memory>
#include <random>
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core.h"
-#include "core/file_sys/program_metadata.h"
-#include "core/hle/kernel/code_set.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
-#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/memory.h"
+#include "core/hle/kernel/k_thread_local_page.h"
+#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/k_worker_task_manager.h"
namespace Kernel {
-namespace {
-/**
- * Sets up the primary application thread
- *
- * @param system The system instance to create the main thread under.
- * @param owner_process The parent process for the main thread
- * @param priority The priority to give the main thread
- */
-void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority,
- KProcessAddress stack_top) {
- const KProcessAddress entry_point = owner_process.GetEntryPoint();
- ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
-
- KThread* thread = KThread::Create(system.Kernel());
- SCOPE_EXIT({ thread->Close(); });
-
- ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
- owner_process.GetIdealCoreId(),
- std::addressof(owner_process))
- .IsSuccess());
-
- // Register 1 must be a handle to the main thread
- Handle thread_handle{};
- owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread);
-
- thread->GetContext32().cpu_registers[0] = 0;
- thread->GetContext64().cpu_registers[0] = 0;
- thread->GetContext32().cpu_registers[1] = thread_handle;
- thread->GetContext64().cpu_registers[1] = thread_handle;
-
- if (system.DebuggerEnabled()) {
- thread->RequestSuspend(SuspendType::Debug);
- }
- // Run our thread.
- void(thread->Run());
-}
-} // Anonymous namespace
+namespace {
-Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit) {
- auto& kernel = system.Kernel();
+Result TerminateChildren(KernelCore& kernel, KProcess* process,
+ const KThread* thread_to_not_terminate) {
+ // Request that all children threads terminate.
+ {
+ KScopedLightLock proc_lk(process->GetListLock());
+ KScopedSchedulerLock sl(kernel);
+
+ if (thread_to_not_terminate != nullptr &&
+ process->GetPinnedThread(GetCurrentCoreId(kernel)) == thread_to_not_terminate) {
+ // NOTE: Here Nintendo unpins the current thread instead of the thread_to_not_terminate.
+ // This is valid because the only caller which uses non-nullptr as argument uses
+ // GetCurrentThreadPointer(), but it's still notable because it seems incorrect at
+ // first glance.
+ process->UnpinCurrentThread();
+ }
- process->name = std::move(process_name);
- process->m_resource_limit = res_limit;
- process->m_system_resource_address = 0;
- process->m_state = State::Created;
- process->m_program_id = 0;
- process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
- : kernel.CreateNewUserProcessID();
- process->m_capabilities.InitializeForMetadatalessProcess();
- process->m_is_initialized = true;
+ auto& thread_list = process->GetThreadList();
+ for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
+ if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
+ if (thread->GetState() != ThreadState::Terminated) {
+ thread->RequestTerminate();
+ }
+ }
+ }
+ }
- std::mt19937 rng(Settings::values.rng_seed_enabled ? Settings::values.rng_seed.GetValue()
- : static_cast<u32>(std::time(nullptr)));
- std::uniform_int_distribution<u64> distribution;
- std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(),
- [&] { return distribution(rng); });
+ // Wait for all children threads to terminate.
+ while (true) {
+ // Get the next child.
+ KThread* cur_child = nullptr;
+ {
+ KScopedLightLock proc_lk(process->GetListLock());
+
+ auto& thread_list = process->GetThreadList();
+ for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
+ if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
+ if (thread->GetState() != ThreadState::Terminated) {
+ if (thread->Open()) {
+ cur_child = thread;
+ break;
+ }
+ }
+ }
+ }
+ }
- kernel.AppendNewProcess(process);
+ // If we didn't find any non-terminated children, we're done.
+ if (cur_child == nullptr) {
+ break;
+ }
- // Clear remaining fields.
- process->m_num_running_threads = 0;
- process->m_is_signaled = false;
- process->m_exception_thread = nullptr;
- process->m_is_suspended = false;
- process->m_schedule_count = 0;
- process->m_is_handle_table_initialized = false;
- process->m_is_hbl = false;
+ // Terminate and close the thread.
+ SCOPE_EXIT({ cur_child->Close(); });
- // Open a reference to the resource limit.
- process->m_resource_limit->Open();
+ if (const Result terminate_result = cur_child->Terminate();
+ ResultTerminationRequested == terminate_result) {
+ R_THROW(terminate_result);
+ }
+ }
R_SUCCEED();
}
-void KProcess::DoWorkerTaskImpl() {
- UNIMPLEMENTED();
-}
-
-KResourceLimit* KProcess::GetResourceLimit() const {
- return m_resource_limit;
-}
+class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
+private:
+ KThread** m_exception_thread;
-void KProcess::IncrementRunningThreadCount() {
- ASSERT(m_num_running_threads.load() >= 0);
- ++m_num_running_threads;
-}
+public:
+ explicit ThreadQueueImplForKProcessEnterUserException(KernelCore& kernel, KThread** t)
+ : KThreadQueue(kernel), m_exception_thread(t) {}
-void KProcess::DecrementRunningThreadCount() {
- ASSERT(m_num_running_threads.load() > 0);
+ virtual void EndWait(KThread* waiting_thread, Result wait_result) override {
+ // Set the exception thread.
+ *m_exception_thread = waiting_thread;
- if (const auto prev = m_num_running_threads--; prev == 1) {
- // TODO(bunnei): Process termination to be implemented when multiprocess is supported.
+ // Invoke the base end wait handler.
+ KThreadQueue::EndWait(waiting_thread, wait_result);
}
-}
-u64 KProcess::GetTotalPhysicalMemoryAvailable() {
- const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
- m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size +
- m_main_thread_stack_size};
- if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
- capacity != pool_size) {
- LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
- }
- if (capacity < m_memory_usage_capacity) {
- return capacity;
+ virtual void CancelWait(KThread* waiting_thread, Result wait_result,
+ bool cancel_timer_task) override {
+ // Remove the thread as a waiter on its mutex owner.
+ waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
+
+ // Invoke the base cancel wait handler.
+ KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
- return m_memory_usage_capacity;
-}
+};
-u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
- return this->GetTotalPhysicalMemoryAvailable() - this->GetSystemResourceSize();
+void GenerateRandom(std::span<u64> out_random) {
+ std::mt19937 rng(Settings::values.rng_seed_enabled ? Settings::values.rng_seed.GetValue()
+ : static_cast<u32>(std::time(nullptr)));
+ std::uniform_int_distribution<u64> distribution;
+ std::generate(out_random.begin(), out_random.end(), [&] { return distribution(rng); });
}
-u64 KProcess::GetTotalPhysicalMemoryUsed() {
- return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() +
- this->GetSystemResourceSize();
-}
+} // namespace
-u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
- return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize();
-}
+void KProcess::Finalize() {
+ // Delete the process local region.
+ this->DeleteThreadLocalRegion(m_plr_address);
-bool KProcess::ReleaseUserException(KThread* thread) {
- KScopedSchedulerLock sl{m_kernel};
+ // Get the used memory size.
+ const size_t used_memory_size = this->GetUsedNonSystemUserPhysicalMemorySize();
- if (m_exception_thread == thread) {
- m_exception_thread = nullptr;
+ // Finalize the page table.
+ m_page_table.Finalize();
- // Remove waiter thread.
- bool has_waiters{};
- if (KThread* next = thread->RemoveKernelWaiterByKey(
- std::addressof(has_waiters),
- reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)));
- next != nullptr) {
- next->EndWait(ResultSuccess);
+ // Finish using our system resource.
+ if (m_system_resource) {
+ if (m_system_resource->IsSecureResource()) {
+ // Finalize optimized memory. If memory wasn't optimized, this is a no-op.
+ m_kernel.MemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
}
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
-
- return true;
- } else {
- return false;
+ m_system_resource->Close();
+ m_system_resource = nullptr;
}
-}
-
-void KProcess::PinCurrentThread(s32 core_id) {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
- // Get the current thread.
- KThread* cur_thread =
- m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
+ // Free all shared memory infos.
+ {
+ auto it = m_shared_memory_list.begin();
+ while (it != m_shared_memory_list.end()) {
+ KSharedMemoryInfo* info = std::addressof(*it);
+ KSharedMemory* shmem = info->GetSharedMemory();
- // If the thread isn't terminated, pin it.
- if (!cur_thread->IsTerminationRequested()) {
- // Pin it.
- this->PinThread(core_id, cur_thread);
- cur_thread->Pin(core_id);
+ while (!info->Close()) {
+ shmem->Close();
+ }
+ shmem->Close();
- // An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ it = m_shared_memory_list.erase(it);
+ KSharedMemoryInfo::Free(m_kernel, info);
+ }
}
-}
-void KProcess::UnpinCurrentThread(s32 core_id) {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
-
- // Get the current thread.
- KThread* cur_thread =
- m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
+ // Our thread local page list must be empty at this point.
+ ASSERT(m_partially_used_tlp_tree.empty());
+ ASSERT(m_fully_used_tlp_tree.empty());
- // Unpin it.
- cur_thread->Unpin();
- this->UnpinThread(core_id, cur_thread);
+ // Release memory to the resource limit.
+ if (m_resource_limit != nullptr) {
+ ASSERT(used_memory_size >= m_memory_release_hint);
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, used_memory_size,
+ used_memory_size - m_memory_release_hint);
+ m_resource_limit->Close();
+ }
- // An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ // Perform inherited finalization.
+ KSynchronizationObject::Finalize();
}
-void KProcess::UnpinThread(KThread* thread) {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
-
- // Get the thread's core id.
- const auto core_id = thread->GetActiveCore();
+Result KProcess::Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
+ bool is_real) {
+ // TODO: remove this special case
+ if (is_real) {
+ // Create and clear the process local region.
+ R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
+ this->GetMemory().ZeroBlock(m_plr_address, Svc::ThreadLocalRegionSize);
+ }
- // Unpin it.
- this->UnpinThread(core_id, thread);
- thread->Unpin();
+ // Copy in the name from parameters.
+ static_assert(sizeof(params.name) < sizeof(m_name));
+ std::memcpy(m_name.data(), params.name.data(), sizeof(params.name));
+ m_name[sizeof(params.name)] = 0;
+
+ // Set misc fields.
+ m_state = State::Created;
+ m_main_thread_stack_size = 0;
+ m_used_kernel_memory_size = 0;
+ m_ideal_core_id = 0;
+ m_flags = params.flags;
+ m_version = params.version;
+ m_program_id = params.program_id;
+ m_code_address = params.code_address;
+ m_code_size = params.code_num_pages * PageSize;
+ m_is_application = True(params.flags & Svc::CreateProcessFlag::IsApplication);
+
+ // Set thread fields.
+ for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ m_running_threads[i] = nullptr;
+ m_pinned_threads[i] = nullptr;
+ m_running_thread_idle_counts[i] = 0;
+ m_running_thread_switch_counts[i] = 0;
+ }
- // An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
-}
+ // Set max memory based on address space type.
+ switch ((params.flags & Svc::CreateProcessFlag::AddressSpaceMask)) {
+ case Svc::CreateProcessFlag::AddressSpace32Bit:
+ case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
+ case Svc::CreateProcessFlag::AddressSpace64Bit:
+ m_max_process_memory = m_page_table.GetHeapRegionSize();
+ break;
+ case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
+ m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
+ break;
+ default:
+ UNREACHABLE();
+ }
-Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
- [[maybe_unused]] size_t size) {
- // Lock ourselves, to prevent concurrent access.
- KScopedLightLock lk(m_state_lock);
+ // Generate random entropy.
+ GenerateRandom(m_entropy);
- // Try to find an existing info for the memory.
- KSharedMemoryInfo* shemen_info = nullptr;
- const auto iter = std::find_if(
- m_shared_memory_list.begin(), m_shared_memory_list.end(),
- [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
- if (iter != m_shared_memory_list.end()) {
- shemen_info = *iter;
- }
+ // Clear remaining fields.
+ m_num_running_threads = 0;
+ m_num_process_switches = 0;
+ m_num_thread_switches = 0;
+ m_num_fpu_switches = 0;
+ m_num_supervisor_calls = 0;
+ m_num_ipc_messages = 0;
- if (shemen_info == nullptr) {
- shemen_info = KSharedMemoryInfo::Allocate(m_kernel);
- R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
+ m_is_signaled = false;
+ m_exception_thread = nullptr;
+ m_is_suspended = false;
+ m_memory_release_hint = 0;
+ m_schedule_count = 0;
+ m_is_handle_table_initialized = false;
- shemen_info->Initialize(shmem);
- m_shared_memory_list.push_back(shemen_info);
- }
+ // Open a reference to our resource limit.
+ m_resource_limit = res_limit;
+ m_resource_limit->Open();
- // Open a reference to the shared memory and its info.
- shmem->Open();
- shemen_info->Open();
+ // We're initialized!
+ m_is_initialized = true;
R_SUCCEED();
}
-void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
- [[maybe_unused]] size_t size) {
- // Lock ourselves, to prevent concurrent access.
- KScopedLightLock lk(m_state_lock);
+Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
+ std::span<const u32> caps, KResourceLimit* res_limit,
+ KMemoryManager::Pool pool, bool immortal) {
+ ASSERT(res_limit != nullptr);
+ ASSERT((params.code_num_pages * PageSize) / PageSize ==
+ static_cast<size_t>(params.code_num_pages));
+
+ // Set members.
+ m_memory_pool = pool;
+ m_is_default_application_system_resource = false;
+ m_is_immortal = immortal;
+
+ // Setup our system resource.
+ if (const size_t system_resource_num_pages = params.system_resource_num_pages;
+ system_resource_num_pages != 0) {
+ // Create a secure system resource.
+ KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
+ R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
+
+ ON_RESULT_FAILURE {
+ secure_resource->Close();
+ };
+
+ // Initialize the secure resource.
+ R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, res_limit,
+ m_memory_pool));
+
+ // Set our system resource.
+ m_system_resource = secure_resource;
+ } else {
+ // Use the system-wide system resource.
+ const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
+ m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
+ : m_kernel.GetSystemSystemResource());
+
+ m_is_default_application_system_resource = is_app;
- KSharedMemoryInfo* shemen_info = nullptr;
- const auto iter = std::find_if(
- m_shared_memory_list.begin(), m_shared_memory_list.end(),
- [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
- if (iter != m_shared_memory_list.end()) {
- shemen_info = *iter;
+ // Open reference to the system resource.
+ m_system_resource->Open();
}
- ASSERT(shemen_info != nullptr);
+ // Ensure we clean up our secure resource, if we fail.
+ ON_RESULT_FAILURE {
+ m_system_resource->Close();
+ m_system_resource = nullptr;
+ };
- if (shemen_info->Close()) {
- m_shared_memory_list.erase(iter);
- KSharedMemoryInfo::Free(m_kernel, shemen_info);
+ // Setup page table.
+ {
+ const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
+ const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
+ const bool enable_das_merge =
+ False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
+ R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+ params.code_address, params.code_num_pages * PageSize,
+ m_system_resource, res_limit, this->GetMemory(), 0));
}
+ ON_RESULT_FAILURE_2 {
+ m_page_table.Finalize();
+ };
- // Close a reference to the shared memory.
- shmem->Close();
-}
+ // Ensure we can insert the code region.
+ R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
+ KMemoryState::Code),
+ ResultInvalidMemoryRegion);
-void KProcess::RegisterThread(KThread* thread) {
- KScopedLightLock lk{m_list_lock};
+ // Map the code region.
+ R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState::Code,
+ KMemoryPermission::KernelRead));
- m_thread_list.push_back(thread);
-}
+ // Initialize capabilities.
+ R_TRY(m_capabilities.InitializeForKip(caps, std::addressof(m_page_table)));
-void KProcess::UnregisterThread(KThread* thread) {
- KScopedLightLock lk{m_list_lock};
+ // Initialize the process id.
+ m_process_id = m_kernel.CreateNewUserProcessID();
+ ASSERT(InitialProcessIdMin <= m_process_id);
+ ASSERT(m_process_id <= InitialProcessIdMax);
- m_thread_list.remove(thread);
-}
+ // Initialize the rest of the process.
+ R_TRY(this->Initialize(params, res_limit, true));
-u64 KProcess::GetFreeThreadCount() const {
- if (m_resource_limit != nullptr) {
- const auto current_value =
- m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
- const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
- return limit_value - current_value;
- } else {
- return 0;
- }
+ // We succeeded!
+ R_SUCCEED();
}
-Result KProcess::Reset() {
- // Lock the process and the scheduler.
- KScopedLightLock lk(m_state_lock);
- KScopedSchedulerLock sl{m_kernel};
+Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
+ std::span<const u32> user_caps, KResourceLimit* res_limit,
+ KMemoryManager::Pool pool, KProcessAddress aslr_space_start) {
+ ASSERT(res_limit != nullptr);
- // Validate that we're in a state that we can reset.
- R_UNLESS(m_state != State::Terminated, ResultInvalidState);
- R_UNLESS(m_is_signaled, ResultInvalidState);
+ // Set members.
+ m_memory_pool = pool;
+ m_is_default_application_system_resource = false;
+ m_is_immortal = false;
- // Clear signaled.
- m_is_signaled = false;
- R_SUCCEED();
-}
+ // Get the memory sizes.
+ const size_t code_num_pages = params.code_num_pages;
+ const size_t system_resource_num_pages = params.system_resource_num_pages;
+ const size_t code_size = code_num_pages * PageSize;
+ const size_t system_resource_size = system_resource_num_pages * PageSize;
-Result KProcess::SetActivity(ProcessActivity activity) {
- // Lock ourselves and the scheduler.
- KScopedLightLock lk{m_state_lock};
- KScopedLightLock list_lk{m_list_lock};
- KScopedSchedulerLock sl{m_kernel};
+ // Reserve memory for our code resource.
+ KScopedResourceReservation memory_reservation(
+ res_limit, Svc::LimitableResource::PhysicalMemoryMax, code_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
- // Validate our state.
- R_UNLESS(m_state != State::Terminating, ResultInvalidState);
- R_UNLESS(m_state != State::Terminated, ResultInvalidState);
+ // Setup our system resource.
+ if (system_resource_num_pages != 0) {
+ // Create a secure system resource.
+ KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
+ R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
- // Either pause or resume.
- if (activity == ProcessActivity::Paused) {
- // Verify that we're not suspended.
- R_UNLESS(!m_is_suspended, ResultInvalidState);
+ ON_RESULT_FAILURE {
+ secure_resource->Close();
+ };
- // Suspend all threads.
- for (auto* thread : this->GetThreadList()) {
- thread->RequestSuspend(SuspendType::Process);
- }
+ // Initialize the secure resource.
+ R_TRY(secure_resource->Initialize(system_resource_size, res_limit, m_memory_pool));
+
+ // Set our system resource.
+ m_system_resource = secure_resource;
- // Set ourselves as suspended.
- this->SetSuspended(true);
} else {
- ASSERT(activity == ProcessActivity::Runnable);
+ // Use the system-wide system resource.
+ const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
+ m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
+ : m_kernel.GetSystemSystemResource());
- // Verify that we're suspended.
- R_UNLESS(m_is_suspended, ResultInvalidState);
+ m_is_default_application_system_resource = is_app;
- // Resume all threads.
- for (auto* thread : this->GetThreadList()) {
- thread->Resume(SuspendType::Process);
- }
+ // Open reference to the system resource.
+ m_system_resource->Open();
+ }
- // Set ourselves as resumed.
- this->SetSuspended(false);
+ // Ensure we clean up our secure resource, if we fail.
+ ON_RESULT_FAILURE {
+ m_system_resource->Close();
+ m_system_resource = nullptr;
+ };
+
+ // Setup page table.
+ {
+ const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
+ const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
+ const bool enable_das_merge =
+ False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
+ R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+ params.code_address, code_size, m_system_resource, res_limit,
+ this->GetMemory(), aslr_space_start));
+ }
+ ON_RESULT_FAILURE_2 {
+ m_page_table.Finalize();
+ };
+
+ // Ensure we can insert the code region.
+ R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
+ ResultInvalidMemoryRegion);
+
+ // Map the code region.
+ R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState::Code,
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped));
+
+ // Initialize capabilities.
+ R_TRY(m_capabilities.InitializeForUser(user_caps, std::addressof(m_page_table)));
+
+ // Initialize the process id.
+ m_process_id = m_kernel.CreateNewUserProcessID();
+ ASSERT(ProcessIdMin <= m_process_id);
+ ASSERT(m_process_id <= ProcessIdMax);
+
+ // If we should optimize memory allocations, do so.
+ if (m_system_resource->IsSecureResource() &&
+ True(params.flags & Svc::CreateProcessFlag::OptimizeMemoryAllocation)) {
+ R_TRY(m_kernel.MemoryManager().InitializeOptimizedMemory(m_process_id, pool));
}
+ // Initialize the rest of the process.
+ R_TRY(this->Initialize(params, res_limit, true));
+
+ // We succeeded, so commit our memory reservation.
+ memory_reservation.Commit();
R_SUCCEED();
}
-Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
- bool is_hbl) {
- m_program_id = metadata.GetTitleID();
- m_ideal_core = metadata.GetMainThreadCore();
- m_is_64bit_process = metadata.Is64BitProgram();
- m_system_resource_size = metadata.GetSystemResourceSize();
- m_image_size = code_size;
- m_is_hbl = is_hbl;
+void KProcess::DoWorkerTaskImpl() {
+ // Terminate child threads.
+ TerminateChildren(m_kernel, this, nullptr);
- if (metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is39Bit) {
- // For 39-bit processes, the ASLR region starts at 0x800'0000 and is ~512GiB large.
- // However, some (buggy) programs/libraries like skyline incorrectly depend on the
- // existence of ASLR pages before the entry point, so we will adjust the load address
- // to point to about 2GiB into the ASLR region.
- m_code_address = 0x8000'0000;
- } else {
- // All other processes can be mapped at the beginning of the code region.
- if (metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is36Bit) {
- m_code_address = 0x800'0000;
- } else {
- m_code_address = 0x20'0000;
- }
+ // Finalize the handle table, if we're not immortal.
+ if (!m_is_immortal && m_is_handle_table_initialized) {
+ this->FinalizeHandleTable();
}
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size);
- if (!memory_reservation.Succeeded()) {
- LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
- code_size + m_system_resource_size);
- R_RETURN(ResultLimitReached);
- }
- // Initialize process address space
- if (const Result result{m_page_table.InitializeForProcess(
- metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
- this->GetEntryPoint(), code_size, std::addressof(m_kernel.GetAppSystemResource()),
- m_resource_limit, m_kernel.System().ApplicationMemory())};
- result.IsError()) {
- R_RETURN(result);
- }
-
- // Map process code region
- if (const Result result{m_page_table.MapProcessCode(this->GetEntryPoint(), code_size / PageSize,
- KMemoryState::Code,
- KMemoryPermission::None)};
- result.IsError()) {
- R_RETURN(result);
- }
-
- // Initialize process capabilities
- const auto& caps{metadata.GetKernelCapabilities()};
- if (const Result result{
- m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)};
- result.IsError()) {
- R_RETURN(result);
- }
-
- // Set memory usage capacity
- switch (metadata.GetAddressSpaceType()) {
- case FileSys::ProgramAddressSpaceType::Is32Bit:
- case FileSys::ProgramAddressSpaceType::Is36Bit:
- case FileSys::ProgramAddressSpaceType::Is39Bit:
- m_memory_usage_capacity =
- m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart();
- break;
+ // Finish termination.
+ this->FinishTermination();
+}
- case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
- m_memory_usage_capacity =
- (m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart()) +
- (m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart());
- break;
+Result KProcess::StartTermination() {
+ // Finalize the handle table when we're done, if the process isn't immortal.
+ SCOPE_EXIT({
+ if (!m_is_immortal) {
+ this->FinalizeHandleTable();
+ }
+ });
- default:
- ASSERT(false);
- break;
- }
+ // Terminate child threads other than the current one.
+ R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
+}
- // Create TLS region
- R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
- memory_reservation.Commit();
+void KProcess::FinishTermination() {
+ // Only allow termination to occur if the process isn't immortal.
+ if (!m_is_immortal) {
+ // Release resource limit hint.
+ if (m_resource_limit != nullptr) {
+ m_memory_release_hint = this->GetUsedNonSystemUserPhysicalMemorySize();
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, 0,
+ m_memory_release_hint);
+ }
+
+ // Change state.
+ {
+ KScopedSchedulerLock sl(m_kernel);
+ this->ChangeState(State::Terminated);
+ }
- R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize()));
+ // Close.
+ this->Close();
+ }
}
-void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
- ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess);
- m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
+void KProcess::Exit() {
+ // Determine whether we need to start terminating
+ bool needs_terminate = false;
+ {
+ KScopedLightLock lk(m_state_lock);
+ KScopedSchedulerLock sl(m_kernel);
+
+ ASSERT(m_state != State::Created);
+ ASSERT(m_state != State::CreatedAttached);
+ ASSERT(m_state != State::Crashed);
+ ASSERT(m_state != State::Terminated);
+ if (m_state == State::Running || m_state == State::RunningAttached ||
+ m_state == State::DebugBreak) {
+ this->ChangeState(State::Terminating);
+ needs_terminate = true;
+ }
+ }
- const std::size_t heap_capacity{m_memory_usage_capacity -
- (m_main_thread_stack_size + m_image_size)};
- ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError());
+ // If we need to start termination, do so.
+ if (needs_terminate) {
+ this->StartTermination();
- this->ChangeState(State::Running);
+ // Register the process as a work task.
+ m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
+ }
- SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top);
+ // Exit the current thread.
+ GetCurrentThread(m_kernel).Exit();
}
-void KProcess::PrepareForTermination() {
- this->ChangeState(State::Terminating);
+Result KProcess::Terminate() {
+ // Determine whether we need to start terminating.
+ bool needs_terminate = false;
+ {
+ KScopedLightLock lk(m_state_lock);
- const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
- for (auto* thread : in_thread_list) {
- if (thread->GetOwnerProcess() != this)
- continue;
+ // Check whether we're allowed to terminate.
+ R_UNLESS(m_state != State::Created, ResultInvalidState);
+ R_UNLESS(m_state != State::CreatedAttached, ResultInvalidState);
- if (thread == GetCurrentThreadPointer(m_kernel))
- continue;
+ KScopedSchedulerLock sl(m_kernel);
- // TODO(Subv): When are the other running/ready threads terminated?
- ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
- "Exiting processes with non-waiting threads is currently unimplemented");
+ if (m_state == State::Running || m_state == State::RunningAttached ||
+ m_state == State::Crashed || m_state == State::DebugBreak) {
+ this->ChangeState(State::Terminating);
+ needs_terminate = true;
+ }
+ }
- thread->Exit();
+ // If we need to terminate, do so.
+ if (needs_terminate) {
+ // Start termination.
+ if (R_SUCCEEDED(this->StartTermination())) {
+ // Finish termination.
+ this->FinishTermination();
+ } else {
+ // Register the process as a work task.
+ m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit,
+ this);
}
- };
+ }
- stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
+ R_SUCCEED();
+}
- this->DeleteThreadLocalRegion(m_plr_address);
- m_plr_address = 0;
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
+ // Lock ourselves, to prevent concurrent access.
+ KScopedLightLock lk(m_state_lock);
- if (m_resource_limit) {
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax,
- m_main_thread_stack_size + m_image_size);
+ // Try to find an existing info for the memory.
+ KSharedMemoryInfo* info = nullptr;
+ for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
+ if (it->GetSharedMemory() == shmem) {
+ info = std::addressof(*it);
+ break;
+ }
}
- this->ChangeState(State::Terminated);
-}
+ // If we didn't find an info, create one.
+ if (info == nullptr) {
+ // Allocate a new info.
+ info = KSharedMemoryInfo::Allocate(m_kernel);
+ R_UNLESS(info != nullptr, ResultOutOfResource);
-void KProcess::Finalize() {
- // Free all shared memory infos.
- {
- auto it = m_shared_memory_list.begin();
- while (it != m_shared_memory_list.end()) {
- KSharedMemoryInfo* info = *it;
- KSharedMemory* shmem = info->GetSharedMemory();
+ // Initialize the info and add it to our list.
+ info->Initialize(shmem);
+ m_shared_memory_list.push_back(*info);
+ }
- while (!info->Close()) {
- shmem->Close();
- }
+ // Open a reference to the shared memory and its info.
+ shmem->Open();
+ info->Open();
- shmem->Close();
+ R_SUCCEED();
+}
- it = m_shared_memory_list.erase(it);
- KSharedMemoryInfo::Free(m_kernel, info);
+void KProcess::RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
+ // Lock ourselves, to prevent concurrent access.
+ KScopedLightLock lk(m_state_lock);
+
+ // Find an existing info for the memory.
+ KSharedMemoryInfo* info = nullptr;
+ auto it = m_shared_memory_list.begin();
+ for (; it != m_shared_memory_list.end(); ++it) {
+ if (it->GetSharedMemory() == shmem) {
+ info = std::addressof(*it);
+ break;
}
}
+ ASSERT(info != nullptr);
- // Release memory to the resource limit.
- if (m_resource_limit != nullptr) {
- m_resource_limit->Close();
- m_resource_limit = nullptr;
+ // Close a reference to the info and its memory.
+ if (info->Close()) {
+ m_shared_memory_list.erase(it);
+ KSharedMemoryInfo::Free(m_kernel, info);
}
- // Finalize the page table.
- m_page_table.Finalize();
-
- // Perform inherited finalization.
- KSynchronizationObject::Finalize();
+ shmem->Close();
}
Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
@@ -518,7 +601,7 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
// See if we can get a region from a partially used TLP.
{
- KScopedSchedulerLock sl{m_kernel};
+ KScopedSchedulerLock sl(m_kernel);
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
tlr = it->Reserve();
@@ -538,7 +621,9 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
// Allocate a new page.
tlp = KThreadLocalPage::Allocate(m_kernel);
R_UNLESS(tlp != nullptr, ResultOutOfMemory);
- auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); });
+ ON_RESULT_FAILURE {
+ KThreadLocalPage::Free(m_kernel, tlp);
+ };
// Initialize the new page.
R_TRY(tlp->Initialize(m_kernel, this));
@@ -549,7 +634,7 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
// Insert into our tree.
{
- KScopedSchedulerLock sl{m_kernel};
+ KScopedSchedulerLock sl(m_kernel);
if (tlp->IsAllUsed()) {
m_fully_used_tlp_tree.insert(*tlp);
} else {
@@ -558,7 +643,6 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
}
// We succeeded!
- tlp_guard.Cancel();
*out = tlr;
R_SUCCEED();
}
@@ -568,7 +652,7 @@ Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
// Release the region.
{
- KScopedSchedulerLock sl{m_kernel};
+ KScopedSchedulerLock sl(m_kernel);
// Try to find the page in the partially used list.
auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
@@ -611,95 +695,213 @@ Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
R_SUCCEED();
}
-bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
- const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
- return wp.type == DebugWatchpointType::None;
- })};
+bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ return rl->Reserve(which, value);
+ } else {
+ return true;
+ }
+}
- if (watch == m_watchpoints.end()) {
- return false;
+bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ return rl->Reserve(which, value, timeout);
+ } else {
+ return true;
}
+}
- watch->start_address = addr;
- watch->end_address = addr + size;
- watch->type = type;
+void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ rl->Release(which, value);
+ }
+}
- for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
- page += PageSize) {
- m_debug_page_refcounts[page]++;
- this->GetMemory().MarkRegionDebug(page, PageSize, true);
+void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ rl->Release(which, value, hint);
}
+}
- return true;
+void KProcess::IncrementRunningThreadCount() {
+ ASSERT(m_num_running_threads.load() >= 0);
+
+ ++m_num_running_threads;
}
-bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
- const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
- return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
- })};
+void KProcess::DecrementRunningThreadCount() {
+ ASSERT(m_num_running_threads.load() > 0);
- if (watch == m_watchpoints.end()) {
+ if (const auto prev = m_num_running_threads--; prev == 1) {
+ this->Terminate();
+ }
+}
+
+bool KProcess::EnterUserException() {
+ // Get the current thread.
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+ ASSERT(this == cur_thread->GetOwnerProcess());
+
+ // Check that we haven't already claimed the exception thread.
+ if (m_exception_thread == cur_thread) {
return false;
}
- watch->start_address = 0;
- watch->end_address = 0;
- watch->type = DebugWatchpointType::None;
+ // Create the wait queue we'll be using.
+ ThreadQueueImplForKProcessEnterUserException wait_queue(m_kernel,
+ std::addressof(m_exception_thread));
- for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
- page += PageSize) {
- m_debug_page_refcounts[page]--;
- if (!m_debug_page_refcounts[page]) {
- this->GetMemory().MarkRegionDebug(page, PageSize, false);
+ // Claim the exception thread.
+ {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl(m_kernel);
+
+ // Check that we're not terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ return false;
}
+
+ // If we don't have an exception thread, we can just claim it directly.
+ if (m_exception_thread == nullptr) {
+ m_exception_thread = cur_thread;
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ return true;
+ }
+
+ // Otherwise, we need to wait until we don't have an exception thread.
+
+ // Add the current thread as a waiter on the current exception thread.
+ cur_thread->SetKernelAddressKey(
+ reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
+ m_exception_thread->AddWaiter(cur_thread);
+
+ // Wait to claim the exception thread.
+ cur_thread->BeginWait(std::addressof(wait_queue));
}
- return true;
+ // If our wait didn't end due to thread termination, we succeeded.
+ return ResultTerminationRequested != cur_thread->GetWaitResult();
}
-void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
- const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
- Svc::MemoryPermission permission) {
- m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
- };
+bool KProcess::LeaveUserException() {
+ return this->ReleaseUserException(GetCurrentThreadPointer(m_kernel));
+}
- this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
+bool KProcess::ReleaseUserException(KThread* thread) {
+ KScopedSchedulerLock sl(m_kernel);
- ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
- ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
- ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
+ if (m_exception_thread == thread) {
+ m_exception_thread = nullptr;
+
+ // Remove waiter thread.
+ bool has_waiters;
+ if (KThread* next = thread->RemoveKernelWaiterByKey(
+ std::addressof(has_waiters),
+ reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
+ next != nullptr) {
+ next->EndWait(ResultSuccess);
+ }
+
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+
+ return true;
+ } else {
+ return false;
+ }
}
-bool KProcess::IsSignaled() const {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
- return m_is_signaled;
+void KProcess::RegisterThread(KThread* thread) {
+ KScopedLightLock lk(m_list_lock);
+
+ m_thread_list.push_back(*thread);
}
-KProcess::KProcess(KernelCore& kernel)
- : KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()},
- m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()},
- m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {}
+void KProcess::UnregisterThread(KThread* thread) {
+ KScopedLightLock lk(m_list_lock);
-KProcess::~KProcess() = default;
+ m_thread_list.erase(m_thread_list.iterator_to(*thread));
+}
+
+size_t KProcess::GetUsedUserPhysicalMemorySize() const {
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
+ const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
+
+ return norm_size + other_size + sec_size;
+}
-void KProcess::ChangeState(State new_state) {
- if (m_state == new_state) {
- return;
+size_t KProcess::GetTotalUserPhysicalMemorySize() const {
+ // Get the amount of free and used size.
+ const size_t free_size =
+ m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
+ const size_t max_size = m_max_process_memory;
+
+ // Determine used size.
+ // NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
+ // GetUsedUserPhysicalMemorySize().
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
+ const size_t sec_size = this->GetRequiredSecureMemorySize();
+ const size_t used_size = norm_size + other_size + sec_size;
+
+ // NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
+ // does it this way.
+ if (used_size + free_size > max_size) {
+ return max_size;
+ } else {
+ return free_size + this->GetUsedUserPhysicalMemorySize();
}
+}
+
+size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
- m_state = new_state;
- m_is_signaled = true;
- this->NotifyAvailable();
+ return norm_size + other_size;
}
-Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
+size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
+ // Get the amount of free and used size.
+ const size_t free_size =
+ m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
+ const size_t max_size = m_max_process_memory;
+
+ // Determine used size.
+ // NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
+ // GetUsedUserPhysicalMemorySize().
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
+ const size_t sec_size = this->GetRequiredSecureMemorySize();
+ const size_t used_size = norm_size + other_size + sec_size;
+
+ // NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
+ // does it this way.
+ if (used_size + free_size > max_size) {
+ return max_size - this->GetRequiredSecureMemorySizeNonDefault();
+ } else {
+ return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
+ }
+}
+
+Result KProcess::Run(s32 priority, size_t stack_size) {
+ // Lock ourselves, to prevent concurrent access.
+ KScopedLightLock lk(m_state_lock);
+
+ // Validate that we're in a state where we can initialize.
+ const auto state = m_state;
+ R_UNLESS(state == State::Created || state == State::CreatedAttached, ResultInvalidState);
+
+ // Place a tentative reservation of a thread for this process.
+ KScopedResourceReservation thread_reservation(this, Svc::LimitableResource::ThreadCountMax);
+ R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached);
+
// Ensure that we haven't already allocated stack.
ASSERT(m_main_thread_stack_size == 0);
// Ensure that we're allocating a valid stack.
stack_size = Common::AlignUp(stack_size, PageSize);
- // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
- R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory);
+ R_UNLESS(stack_size + m_code_size <= m_max_process_memory, ResultOutOfMemory);
+ R_UNLESS(stack_size + m_code_size >= m_code_size, ResultOutOfMemory);
// Place a tentative reservation of memory for our new stack.
KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
@@ -707,21 +909,370 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
// Allocate and map our stack.
+ KProcessAddress stack_top = 0;
if (stack_size) {
KProcessAddress stack_bottom;
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
- m_main_thread_stack_top = stack_bottom + stack_size;
+ stack_top = stack_bottom + stack_size;
m_main_thread_stack_size = stack_size;
}
+ // Ensure our stack is safe to clean up on exit.
+ ON_RESULT_FAILURE {
+ if (m_main_thread_stack_size) {
+ ASSERT(R_SUCCEEDED(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size,
+ m_main_thread_stack_size / PageSize,
+ KMemoryState::Stack)));
+ m_main_thread_stack_size = 0;
+ }
+ };
+
+ // Set our maximum heap size.
+ R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory -
+ (m_main_thread_stack_size + m_code_size)));
+
+ // Initialize our handle table.
+ R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
+ ON_RESULT_FAILURE_2 {
+ this->FinalizeHandleTable();
+ };
+
+ // Create a new thread for the process.
+ KThread* main_thread = KThread::Create(m_kernel);
+ R_UNLESS(main_thread != nullptr, ResultOutOfResource);
+ SCOPE_EXIT({ main_thread->Close(); });
+
+ // Initialize the thread.
+ R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
+ stack_top, priority, m_ideal_core_id, this));
+
+ // Register the thread, and commit our reservation.
+ KThread::Register(m_kernel, main_thread);
+ thread_reservation.Commit();
+
+ // Add the thread to our handle table.
+ Handle thread_handle;
+ R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
+
+ // Set the thread arguments.
+ main_thread->GetContext32().cpu_registers[0] = 0;
+ main_thread->GetContext64().cpu_registers[0] = 0;
+ main_thread->GetContext32().cpu_registers[1] = thread_handle;
+ main_thread->GetContext64().cpu_registers[1] = thread_handle;
+
+ // Update our state.
+ this->ChangeState((state == State::Created) ? State::Running : State::RunningAttached);
+ ON_RESULT_FAILURE_2 {
+ this->ChangeState(state);
+ };
+
+ // Suspend for debug, if we should.
+ if (m_kernel.System().DebuggerEnabled()) {
+ main_thread->RequestSuspend(SuspendType::Debug);
+ }
+
+ // Run our thread.
+ R_TRY(main_thread->Run());
+
+ // Open a reference to represent that we're running.
+ this->Open();
+
// We succeeded! Commit our memory reservation.
mem_reservation.Commit();
R_SUCCEED();
}
+Result KProcess::Reset() {
+ // Lock the process and the scheduler.
+ KScopedLightLock lk(m_state_lock);
+ KScopedSchedulerLock sl(m_kernel);
+
+ // Validate that we're in a state that we can reset.
+ R_UNLESS(m_state != State::Terminated, ResultInvalidState);
+ R_UNLESS(m_is_signaled, ResultInvalidState);
+
+ // Clear signaled.
+ m_is_signaled = false;
+ R_SUCCEED();
+}
+
+Result KProcess::SetActivity(Svc::ProcessActivity activity) {
+ // Lock ourselves and the scheduler.
+ KScopedLightLock lk(m_state_lock);
+ KScopedLightLock list_lk(m_list_lock);
+ KScopedSchedulerLock sl(m_kernel);
+
+ // Validate our state.
+ R_UNLESS(m_state != State::Terminating, ResultInvalidState);
+ R_UNLESS(m_state != State::Terminated, ResultInvalidState);
+
+ // Either pause or resume.
+ if (activity == Svc::ProcessActivity::Paused) {
+ // Verify that we're not suspended.
+ R_UNLESS(!m_is_suspended, ResultInvalidState);
+
+ // Suspend all threads.
+ auto end = this->GetThreadList().end();
+ for (auto it = this->GetThreadList().begin(); it != end; ++it) {
+ it->RequestSuspend(SuspendType::Process);
+ }
+
+ // Set ourselves as suspended.
+ this->SetSuspended(true);
+ } else {
+ ASSERT(activity == Svc::ProcessActivity::Runnable);
+
+ // Verify that we're suspended.
+ R_UNLESS(m_is_suspended, ResultInvalidState);
+
+ // Resume all threads.
+ auto end = this->GetThreadList().end();
+ for (auto it = this->GetThreadList().begin(); it != end; ++it) {
+ it->Resume(SuspendType::Process);
+ }
+
+ // Set ourselves as resumed.
+ this->SetSuspended(false);
+ }
+
+ R_SUCCEED();
+}
+
+void KProcess::PinCurrentThread() {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(m_kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+
+ // If the thread isn't terminated, pin it.
+ if (!cur_thread->IsTerminationRequested()) {
+ // Pin it.
+ this->PinThread(core_id, cur_thread);
+ cur_thread->Pin(core_id);
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ }
+}
+
+void KProcess::UnpinCurrentThread() {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(m_kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+
+ // Unpin it.
+ cur_thread->Unpin();
+ this->UnpinThread(core_id, cur_thread);
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+}
+
+void KProcess::UnpinThread(KThread* thread) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+
+ // Get the thread's core id.
+ const auto core_id = thread->GetActiveCore();
+
+ // Unpin it.
+ this->UnpinThread(core_id, thread);
+ thread->Unpin();
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+}
+
+Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids,
+ s32 max_out_count) {
+ // TODO: use current memory reference
+ auto& memory = m_kernel.System().ApplicationMemory();
+
+ // Lock the list.
+ KScopedLightLock lk(m_list_lock);
+
+ // Iterate over the list.
+ s32 count = 0;
+ auto end = this->GetThreadList().end();
+ for (auto it = this->GetThreadList().begin(); it != end; ++it) {
+ // If we're within array bounds, write the id.
+ if (count < max_out_count) {
+ // Get the thread id.
+ KThread* thread = std::addressof(*it);
+ const u64 id = thread->GetId();
+
+ // Copy the id to userland.
+ memory.Write64(out_thread_ids + count * sizeof(u64), id);
+ }
+
+ // Increment the count.
+ ++count;
+ }
+
+ // We successfully iterated the list.
+ *out_num_threads = count;
+ R_SUCCEED();
+}
+
+void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
+
+KProcess::KProcess(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
+ m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
+ m_handle_table{kernel} {}
+KProcess::~KProcess() = default;
+
+Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
+ KProcessAddress aslr_space_start, bool is_hbl) {
+ // Create a resource limit for the process.
+ const auto physical_memory_size =
+ m_kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
+ auto* res_limit =
+ Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
+
+ // Ensure we maintain a clean state on exit.
+ SCOPE_EXIT({ res_limit->Close(); });
+
+ // Declare flags and code address.
+ Svc::CreateProcessFlag flag{};
+ u64 code_address{};
+
+ // We are an application.
+ flag |= Svc::CreateProcessFlag::IsApplication;
+
+ // If we are 64-bit, create as such.
+ if (metadata.Is64BitProgram()) {
+ flag |= Svc::CreateProcessFlag::Is64Bit;
+ }
+
+ // Set the address space type and code address.
+ switch (metadata.GetAddressSpaceType()) {
+ case FileSys::ProgramAddressSpaceType::Is39Bit:
+ flag |= Svc::CreateProcessFlag::AddressSpace64Bit;
+
+ // For 39-bit processes, the ASLR region starts at 0x800'0000 and is ~512GiB large.
+ // However, some (buggy) programs/libraries like skyline incorrectly depend on the
+ // existence of ASLR pages before the entry point, so we will adjust the load address
+ // to point to about 2GiB into the ASLR region.
+ code_address = 0x8000'0000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is36Bit:
+ flag |= Svc::CreateProcessFlag::AddressSpace64BitDeprecated;
+ code_address = 0x800'0000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is32Bit:
+ flag |= Svc::CreateProcessFlag::AddressSpace32Bit;
+ code_address = 0x20'0000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
+ flag |= Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias;
+ code_address = 0x20'0000;
+ break;
+ }
+
+ Svc::CreateProcessParameter params{
+ .name = {},
+ .version = {},
+ .program_id = metadata.GetTitleID(),
+ .code_address = code_address + GetInteger(aslr_space_start),
+ .code_num_pages = static_cast<s32>(code_size / PageSize),
+ .flags = flag,
+ .reslimit = Svc::InvalidHandle,
+ .system_resource_num_pages = static_cast<s32>(metadata.GetSystemResourceSize() / PageSize),
+ };
+
+ // Set the process name.
+ const auto& name = metadata.GetName();
+ static_assert(sizeof(params.name) <= sizeof(name));
+ std::memcpy(params.name.data(), name.data(), sizeof(params.name));
+
+ // Initialize for application process.
+ R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit,
+ KMemoryManager::Pool::Application, aslr_space_start));
+
+ // Assign remaining properties.
+ m_is_hbl = is_hbl;
+ m_ideal_core_id = metadata.GetMainThreadCore();
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
+ const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
+ Svc::MemoryPermission permission) {
+ m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
+ };
+
+ this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
+
+ ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
+ ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
+ ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
+
+#ifdef HAS_NCE
+ if (Settings::IsNceEnabled()) {
+ auto& buffer = m_kernel.System().DeviceMemory().buffer;
+ const auto& code = code_set.CodeSegment();
+ const auto& patch = code_set.PatchSegment();
+ buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true);
+ buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true);
+ ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
+ }
+#endif
+}
+
+bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
+ const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
+ return wp.type == DebugWatchpointType::None;
+ })};
+
+ if (watch == m_watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = addr;
+ watch->end_address = addr + size;
+ watch->type = type;
+
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
+ m_debug_page_refcounts[page]++;
+ this->GetMemory().MarkRegionDebug(page, PageSize, true);
+ }
+
+ return true;
+}
+
+bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
+ const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
+ return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
+ })};
+
+ if (watch == m_watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = 0;
+ watch->end_address = 0;
+ watch->type = DebugWatchpointType::None;
+
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
+ m_debug_page_refcounts[page]--;
+ if (!m_debug_page_refcounts[page]) {
+ this->GetMemory().MarkRegionDebug(page, PageSize, false);
+ }
+ }
+
+ return true;
+}
+
Core::Memory::Memory& KProcess::GetMemory() const {
// TODO: per-process memory
return m_kernel.System().ApplicationMemory();
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 146e07a57..d8cd0fdde 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -1,59 +1,24 @@
-// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <array>
-#include <cstddef>
-#include <list>
#include <map>
-#include <string>
+
+#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_address_arbiter.h"
-#include "core/hle/kernel/k_auto_object.h"
+#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_page_table_manager.h"
+#include "core/hle/kernel/k_process_page_table.h"
+#include "core/hle/kernel/k_system_resource.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
-#include "core/hle/kernel/k_typed_address.h"
-#include "core/hle/kernel/k_worker_task.h"
-#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/slab_helpers.h"
-#include "core/hle/result.h"
-
-namespace Core {
-namespace Memory {
-class Memory;
-};
-
-class System;
-} // namespace Core
-
-namespace FileSys {
-class ProgramMetadata;
-}
namespace Kernel {
-class KernelCore;
-class KResourceLimit;
-class KThread;
-class KSharedMemoryInfo;
-class TLSPage;
-
-struct CodeSet;
-
-enum class MemoryRegion : u16 {
- APPLICATION = 1,
- SYSTEM = 2,
- BASE = 3,
-};
-
-enum class ProcessActivity : u32 {
- Runnable,
- Paused,
-};
-
enum class DebugWatchpointType : u8 {
None = 0,
Read = 1 << 0,
@@ -72,9 +37,6 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public:
- explicit KProcess(KernelCore& kernel);
- ~KProcess() override;
-
enum class State {
Created = static_cast<u32>(Svc::ProcessState::Created),
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
@@ -86,470 +48,502 @@ public:
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
};
- enum : u64 {
- /// Lowest allowed process ID for a kernel initial process.
- InitialKIPIDMin = 1,
- /// Highest allowed process ID for a kernel initial process.
- InitialKIPIDMax = 80,
+ using ThreadList = Common::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
- /// Lowest allowed process ID for a userland process.
- ProcessIDMin = 81,
- /// Highest allowed process ID for a userland process.
- ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
- };
+ static constexpr size_t AslrAlignment = 2_MiB;
- // Used to determine how process IDs are assigned.
- enum class ProcessType {
- KernelInternal,
- Userland,
- };
-
- static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
+public:
+ static constexpr u64 InitialProcessIdMin = 1;
+ static constexpr u64 InitialProcessIdMax = 0x50;
- static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit);
+ static constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
+ static constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
- /// Gets a reference to the process' page table.
- KPageTable& GetPageTable() {
- return m_page_table;
- }
+private:
+ using SharedMemoryInfoList = Common::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
+ using TLPTree =
+ Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
+ using TLPIterator = TLPTree::iterator;
- /// Gets const a reference to the process' page table.
- const KPageTable& GetPageTable() const {
- return m_page_table;
- }
+private:
+ KProcessPageTable m_page_table;
+ std::atomic<size_t> m_used_kernel_memory_size{};
+ TLPTree m_fully_used_tlp_tree{};
+ TLPTree m_partially_used_tlp_tree{};
+ s32 m_ideal_core_id{};
+ KResourceLimit* m_resource_limit{};
+ KSystemResource* m_system_resource{};
+ size_t m_memory_release_hint{};
+ State m_state{};
+ KLightLock m_state_lock;
+ KLightLock m_list_lock;
+ KConditionVariable m_cond_var;
+ KAddressArbiter m_address_arbiter;
+ std::array<u64, 4> m_entropy{};
+ bool m_is_signaled{};
+ bool m_is_initialized{};
+ bool m_is_application{};
+ bool m_is_default_application_system_resource{};
+ bool m_is_hbl{};
+ std::array<char, 13> m_name{};
+ std::atomic<u16> m_num_running_threads{};
+ Svc::CreateProcessFlag m_flags{};
+ KMemoryManager::Pool m_memory_pool{};
+ s64 m_schedule_count{};
+ KCapabilities m_capabilities{};
+ u64 m_program_id{};
+ u64 m_process_id{};
+ KProcessAddress m_code_address{};
+ size_t m_code_size{};
+ size_t m_main_thread_stack_size{};
+ size_t m_max_process_memory{};
+ u32 m_version{};
+ KHandleTable m_handle_table;
+ KProcessAddress m_plr_address{};
+ KThread* m_exception_thread{};
+ ThreadList m_thread_list{};
+ SharedMemoryInfoList m_shared_memory_list{};
+ bool m_is_suspended{};
+ bool m_is_immortal{};
+ bool m_is_handle_table_initialized{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_switch_counts{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
+ std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
+ std::map<KProcessAddress, u64> m_debug_page_refcounts{};
+ std::atomic<s64> m_cpu_time{};
+ std::atomic<s64> m_num_process_switches{};
+ std::atomic<s64> m_num_thread_switches{};
+ std::atomic<s64> m_num_fpu_switches{};
+ std::atomic<s64> m_num_supervisor_calls{};
+ std::atomic<s64> m_num_ipc_messages{};
+ std::atomic<s64> m_num_ipc_replies{};
+ std::atomic<s64> m_num_ipc_receives{};
+#ifdef HAS_NCE
+ std::unordered_map<u64, u64> m_post_handlers{};
+#endif
- /// Gets a reference to the process' handle table.
- KHandleTable& GetHandleTable() {
- return m_handle_table;
- }
+private:
+ Result StartTermination();
+ void FinishTermination();
- /// Gets a const reference to the process' handle table.
- const KHandleTable& GetHandleTable() const {
- return m_handle_table;
+ void PinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(m_pinned_threads[core_id] == nullptr);
+ m_pinned_threads[core_id] = thread;
}
- /// Gets a reference to process's memory.
- Core::Memory::Memory& GetMemory() const;
-
- Result SignalToAddress(KProcessAddress address) {
- return m_condition_var.SignalToAddress(address);
+ void UnpinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(m_pinned_threads[core_id] == thread);
+ m_pinned_threads[core_id] = nullptr;
}
- Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
- return m_condition_var.WaitForAddress(handle, address, tag);
- }
+public:
+ explicit KProcess(KernelCore& kernel);
+ ~KProcess() override;
- void SignalConditionVariable(u64 cv_key, int32_t count) {
- return m_condition_var.Signal(cv_key, count);
- }
+ Result Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
+ bool is_real);
- Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
- R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
- }
+ Result Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
+ std::span<const u32> caps, KResourceLimit* res_limit,
+ KMemoryManager::Pool pool, bool immortal);
+ Result Initialize(const Svc::CreateProcessParameter& params, std::span<const u32> user_caps,
+ KResourceLimit* res_limit, KMemoryManager::Pool pool,
+ KProcessAddress aslr_space_start);
+ void Exit();
- Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
- s32 count) {
- R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
+ const char* GetName() const {
+ return m_name.data();
}
- Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
- s64 timeout) {
- R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
+ u64 GetProgramId() const {
+ return m_program_id;
}
- KProcessAddress GetProcessLocalRegionAddress() const {
- return m_plr_address;
+ u64 GetProcessId() const {
+ return m_process_id;
}
- /// Gets the current status of the process
State GetState() const {
return m_state;
}
- /// Gets the unique ID that identifies this particular process.
- u64 GetProcessId() const {
- return m_process_id;
+ u64 GetCoreMask() const {
+ return m_capabilities.GetCoreMask();
+ }
+ u64 GetPhysicalCoreMask() const {
+ return m_capabilities.GetPhysicalCoreMask();
+ }
+ u64 GetPriorityMask() const {
+ return m_capabilities.GetPriorityMask();
}
- /// Gets the program ID corresponding to this process.
- u64 GetProgramId() const {
- return m_program_id;
+ s32 GetIdealCoreId() const {
+ return m_ideal_core_id;
+ }
+ void SetIdealCoreId(s32 core_id) {
+ m_ideal_core_id = core_id;
}
- KProcessAddress GetEntryPoint() const {
- return m_code_address;
+ bool CheckThreadPriority(s32 prio) const {
+ return ((1ULL << prio) & this->GetPriorityMask()) != 0;
}
- /// Gets the resource limit descriptor for this process
- KResourceLimit* GetResourceLimit() const;
+ u32 GetCreateProcessFlags() const {
+ return static_cast<u32>(m_flags);
+ }
- /// Gets the ideal CPU core ID for this process
- u8 GetIdealCoreId() const {
- return m_ideal_core;
+ bool Is64Bit() const {
+ return True(m_flags & Svc::CreateProcessFlag::Is64Bit);
}
- /// Checks if the specified thread priority is valid.
- bool CheckThreadPriority(s32 prio) const {
- return ((1ULL << prio) & GetPriorityMask()) != 0;
+ KProcessAddress GetEntryPoint() const {
+ return m_code_address;
}
- /// Gets the bitmask of allowed cores that this process' threads can run on.
- u64 GetCoreMask() const {
- return m_capabilities.GetCoreMask();
+ size_t GetMainStackSize() const {
+ return m_main_thread_stack_size;
}
- /// Gets the bitmask of allowed thread priorities.
- u64 GetPriorityMask() const {
- return m_capabilities.GetPriorityMask();
+ KMemoryManager::Pool GetMemoryPool() const {
+ return m_memory_pool;
}
- /// Gets the amount of secure memory to allocate for memory management.
- u32 GetSystemResourceSize() const {
- return m_system_resource_size;
+ u64 GetRandomEntropy(size_t i) const {
+ return m_entropy[i];
}
- /// Gets the amount of secure memory currently in use for memory management.
- u32 GetSystemResourceUsage() const {
- // On hardware, this returns the amount of system resource memory that has
- // been used by the kernel. This is problematic for Yuzu to emulate, because
- // system resource memory is used for page tables -- and yuzu doesn't really
- // have a way to calculate how much memory is required for page tables for
- // the current process at any given time.
- // TODO: Is this even worth implementing? Games may retrieve this value via
- // an SDK function that gets used + available system resource size for debug
- // or diagnostic purposes. However, it seems unlikely that a game would make
- // decisions based on how much system memory is dedicated to its page tables.
- // Is returning a value other than zero wise?
- return 0;
+ bool IsApplication() const {
+ return m_is_application;
}
- /// Whether this process is an AArch64 or AArch32 process.
- bool Is64BitProcess() const {
- return m_is_64bit_process;
+ bool IsDefaultApplicationSystemResource() const {
+ return m_is_default_application_system_resource;
}
bool IsSuspended() const {
return m_is_suspended;
}
-
void SetSuspended(bool suspended) {
m_is_suspended = suspended;
}
- /// Gets the total running time of the process instance in ticks.
- u64 GetCPUTimeTicks() const {
- return m_total_process_running_time_ticks;
+ Result Terminate();
+
+ bool IsTerminated() const {
+ return m_state == State::Terminated;
}
- /// Updates the total running time, adding the given ticks to it.
- void UpdateCPUTimeTicks(u64 ticks) {
- m_total_process_running_time_ticks += ticks;
+ bool IsPermittedSvc(u32 svc_id) const {
+ return m_capabilities.IsPermittedSvc(svc_id);
}
- /// Gets the process schedule count, used for thread yielding
- s64 GetScheduledCount() const {
- return m_schedule_count;
+ bool IsPermittedInterrupt(s32 interrupt_id) const {
+ return m_capabilities.IsPermittedInterrupt(interrupt_id);
}
- /// Increments the process schedule count, used for thread yielding.
- void IncrementScheduledCount() {
- ++m_schedule_count;
+ bool IsPermittedDebug() const {
+ return m_capabilities.IsPermittedDebug();
}
- void IncrementRunningThreadCount();
- void DecrementRunningThreadCount();
+ bool CanForceDebug() const {
+ return m_capabilities.CanForceDebug();
+ }
- void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
- m_running_threads[core] = thread;
- m_running_thread_idle_counts[core] = idle_count;
+ bool IsHbl() const {
+ return m_is_hbl;
}
- void ClearRunningThread(KThread* thread) {
- for (size_t i = 0; i < m_running_threads.size(); ++i) {
- if (m_running_threads[i] == thread) {
- m_running_threads[i] = nullptr;
- }
- }
+ u32 GetAllocateOption() const {
+ return m_page_table.GetAllocateOption();
}
- [[nodiscard]] KThread* GetRunningThread(s32 core) const {
- return m_running_threads[core];
+ ThreadList& GetThreadList() {
+ return m_thread_list;
+ }
+ const ThreadList& GetThreadList() const {
+ return m_thread_list;
}
+ bool EnterUserException();
+ bool LeaveUserException();
bool ReleaseUserException(KThread* thread);
- [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
+ KThread* GetPinnedThread(s32 core_id) const {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
return m_pinned_threads[core_id];
}
- /// Gets 8 bytes of random data for svcGetInfo RandomEntropy
- u64 GetRandomEntropy(std::size_t index) const {
- return m_random_entropy.at(index);
+ const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
+ return m_capabilities.GetSvcPermissions();
}
- /// Retrieves the total physical memory available to this process in bytes.
- u64 GetTotalPhysicalMemoryAvailable();
-
- /// Retrieves the total physical memory available to this process in bytes,
- /// without the size of the personal system resource heap added to it.
- u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
-
- /// Retrieves the total physical memory used by this process in bytes.
- u64 GetTotalPhysicalMemoryUsed();
+ KResourceLimit* GetResourceLimit() const {
+ return m_resource_limit;
+ }
- /// Retrieves the total physical memory used by this process in bytes,
- /// without the size of the personal system resource heap added to it.
- u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
+ bool ReserveResource(Svc::LimitableResource which, s64 value);
+ bool ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout);
+ void ReleaseResource(Svc::LimitableResource which, s64 value);
+ void ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint);
- /// Gets the list of all threads created with this process as their owner.
- std::list<KThread*>& GetThreadList() {
- return m_thread_list;
+ KLightLock& GetStateLock() {
+ return m_state_lock;
+ }
+ KLightLock& GetListLock() {
+ return m_list_lock;
}
- /// Registers a thread as being created under this process,
- /// adding it to this process' thread list.
- void RegisterThread(KThread* thread);
+ KProcessPageTable& GetPageTable() {
+ return m_page_table;
+ }
+ const KProcessPageTable& GetPageTable() const {
+ return m_page_table;
+ }
- /// Unregisters a thread from this process, removing it
- /// from this process' thread list.
- void UnregisterThread(KThread* thread);
+ KHandleTable& GetHandleTable() {
+ return m_handle_table;
+ }
+ const KHandleTable& GetHandleTable() const {
+ return m_handle_table;
+ }
- /// Retrieves the number of available threads for this process.
- u64 GetFreeThreadCount() const;
-
- /// Clears the signaled state of the process if and only if it's signaled.
- ///
- /// @pre The process must not be already terminated. If this is called on a
- /// terminated process, then ResultInvalidState will be returned.
- ///
- /// @pre The process must be in a signaled state. If this is called on a
- /// process instance that is not signaled, ResultInvalidState will be
- /// returned.
- Result Reset();
+ size_t GetUsedUserPhysicalMemorySize() const;
+ size_t GetTotalUserPhysicalMemorySize() const;
+ size_t GetUsedNonSystemUserPhysicalMemorySize() const;
+ size_t GetTotalNonSystemUserPhysicalMemorySize() const;
- /**
- * Loads process-specifics configuration info with metadata provided
- * by an executable.
- *
- * @param metadata The provided metadata to load process specific info from.
- *
- * @returns ResultSuccess if all relevant metadata was able to be
- * loaded and parsed. Otherwise, an error code is returned.
- */
- Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
- bool is_hbl);
-
- /**
- * Starts the main application thread for this process.
- *
- * @param main_thread_priority The priority for the main thread.
- * @param stack_size The stack size for the main thread in bytes.
- */
- void Run(s32 main_thread_priority, u64 stack_size);
-
- /**
- * Prepares a process for termination by stopping all of its threads
- * and clearing any other resources.
- */
- void PrepareForTermination();
+ Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
+ void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
- void LoadModule(CodeSet code_set, KProcessAddress base_addr);
+ Result CreateThreadLocalRegion(KProcessAddress* out);
+ Result DeleteThreadLocalRegion(KProcessAddress addr);
- bool IsInitialized() const override {
- return m_is_initialized;
+ KProcessAddress GetProcessLocalRegionAddress() const {
+ return m_plr_address;
}
- static void PostDestroy(uintptr_t arg) {}
-
- void Finalize() override;
-
- u64 GetId() const override {
- return GetProcessId();
+ KThread* GetExceptionThread() const {
+ return m_exception_thread;
}
- bool IsHbl() const {
- return m_is_hbl;
+ void AddCpuTime(s64 diff) {
+ m_cpu_time += diff;
+ }
+ s64 GetCpuTime() {
+ return m_cpu_time.load();
}
- bool IsSignaled() const override;
-
- void DoWorkerTaskImpl();
+ s64 GetScheduledCount() const {
+ return m_schedule_count;
+ }
+ void IncrementScheduledCount() {
+ ++m_schedule_count;
+ }
- Result SetActivity(ProcessActivity activity);
+ void IncrementRunningThreadCount();
+ void DecrementRunningThreadCount();
- void PinCurrentThread(s32 core_id);
- void UnpinCurrentThread(s32 core_id);
- void UnpinThread(KThread* thread);
+ size_t GetRequiredSecureMemorySizeNonDefault() const {
+ if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->CalculateRequiredSecureMemorySize();
+ }
- KLightLock& GetStateLock() {
- return m_state_lock;
+ return 0;
}
- Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
- void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
-
- ///////////////////////////////////////////////////////////////////////////////////////////////
- // Thread-local storage management
-
- // Marks the next available region as used and returns the address of the slot.
- [[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
+ size_t GetRequiredSecureMemorySize() const {
+ if (m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->CalculateRequiredSecureMemorySize();
+ }
- // Frees a used TLS slot identified by the given address
- Result DeleteThreadLocalRegion(KProcessAddress addr);
+ return 0;
+ }
- ///////////////////////////////////////////////////////////////////////////////////////////////
- // Debug watchpoint management
+ size_t GetTotalSystemResourceSize() const {
+ if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->GetSize();
+ }
- // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
- bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
+ return 0;
+ }
- // Attempts to remove the watchpoint specified by the given parameters.
- bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
+ size_t GetUsedSystemResourceSize() const {
+ if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->GetUsedSize();
+ }
- const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
- return m_watchpoints;
+ return 0;
}
- const std::string& GetName() {
- return name;
+ void SetRunningThread(s32 core, KThread* thread, u64 idle_count, u64 switch_count) {
+ m_running_threads[core] = thread;
+ m_running_thread_idle_counts[core] = idle_count;
+ m_running_thread_switch_counts[core] = switch_count;
}
-private:
- void PinThread(s32 core_id, KThread* thread) {
- ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
- ASSERT(thread != nullptr);
- ASSERT(m_pinned_threads[core_id] == nullptr);
- m_pinned_threads[core_id] = thread;
+ void ClearRunningThread(KThread* thread) {
+ for (size_t i = 0; i < m_running_threads.size(); ++i) {
+ if (m_running_threads[i] == thread) {
+ m_running_threads[i] = nullptr;
+ }
+ }
}
- void UnpinThread(s32 core_id, KThread* thread) {
- ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
- ASSERT(thread != nullptr);
- ASSERT(m_pinned_threads[core_id] == thread);
- m_pinned_threads[core_id] = nullptr;
+ const KSystemResource& GetSystemResource() const {
+ return *m_system_resource;
}
- void FinalizeHandleTable() {
- // Finalize the table.
- m_handle_table.Finalize();
-
- // Note that the table is finalized.
- m_is_handle_table_initialized = false;
+ const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
+ return m_system_resource->GetMemoryBlockSlabManager();
+ }
+ const KBlockInfoManager& GetBlockInfoManager() const {
+ return m_system_resource->GetBlockInfoManager();
+ }
+ const KPageTableManager& GetPageTableManager() const {
+ return m_system_resource->GetPageTableManager();
}
- void ChangeState(State new_state);
-
- /// Allocates the main thread stack for the process, given the stack size in bytes.
- Result AllocateMainThreadStack(std::size_t stack_size);
-
- /// Memory manager for this process
- KPageTable m_page_table;
+ KThread* GetRunningThread(s32 core) const {
+ return m_running_threads[core];
+ }
+ u64 GetRunningThreadIdleCount(s32 core) const {
+ return m_running_thread_idle_counts[core];
+ }
+ u64 GetRunningThreadSwitchCount(s32 core) const {
+ return m_running_thread_switch_counts[core];
+ }
- /// Current status of the process
- State m_state{};
+ void RegisterThread(KThread* thread);
+ void UnregisterThread(KThread* thread);
- /// The ID of this process
- u64 m_process_id = 0;
+ Result Run(s32 priority, size_t stack_size);
- /// Title ID corresponding to the process
- u64 m_program_id = 0;
+ Result Reset();
- /// Specifies additional memory to be reserved for the process's memory management by the
- /// system. When this is non-zero, secure memory is allocated and used for page table allocation
- /// instead of using the normal global page tables/memory block management.
- u32 m_system_resource_size = 0;
+ void SetDebugBreak() {
+ if (m_state == State::RunningAttached) {
+ this->ChangeState(State::DebugBreak);
+ }
+ }
- /// Resource limit descriptor for this process
- KResourceLimit* m_resource_limit{};
+ void SetAttached() {
+ if (m_state == State::DebugBreak) {
+ this->ChangeState(State::RunningAttached);
+ }
+ }
- KVirtualAddress m_system_resource_address{};
+ Result SetActivity(Svc::ProcessActivity activity);
- /// The ideal CPU core for this process, threads are scheduled on this core by default.
- u8 m_ideal_core = 0;
+ void PinCurrentThread();
+ void UnpinCurrentThread();
+ void UnpinThread(KThread* thread);
- /// Contains the parsed process capability descriptors.
- ProcessCapabilities m_capabilities;
+ void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
+ return m_cond_var.Signal(cv_key, count);
+ }
- /// Whether or not this process is AArch64, or AArch32.
- /// By default, we currently assume this is true, unless otherwise
- /// specified by metadata provided to the process during loading.
- bool m_is_64bit_process = true;
+ Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
+ R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
+ }
- /// Total running time for the process in ticks.
- std::atomic<u64> m_total_process_running_time_ticks = 0;
+ Result SignalAddressArbiter(uintptr_t address, Svc::SignalType signal_type, s32 value,
+ s32 count) {
+ R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
+ }
- /// Per-process handle table for storing created object handles in.
- KHandleTable m_handle_table;
+ Result WaitAddressArbiter(uintptr_t address, Svc::ArbitrationType arb_type, s32 value,
+ s64 timeout) {
+ R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
+ }
- /// Per-process address arbiter.
- KAddressArbiter m_address_arbiter;
+ Result GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids, s32 max_out_count);
- /// The per-process mutex lock instance used for handling various
- /// forms of services, such as lock arbitration, and condition
- /// variable related facilities.
- KConditionVariable m_condition_var;
+ static void Switch(KProcess* cur_process, KProcess* next_process);
- /// Address indicating the location of the process' dedicated TLS region.
- KProcessAddress m_plr_address = 0;
+#ifdef HAS_NCE
+ std::unordered_map<u64, u64>& GetPostHandlers() noexcept {
+ return m_post_handlers;
+ }
+#endif
- /// Address indicating the location of the process's entry point.
- KProcessAddress m_code_address = 0;
+public:
+ // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
+ bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
- /// Random values for svcGetInfo RandomEntropy
- std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
+ // Attempts to remove the watchpoint specified by the given parameters.
+ bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
- /// List of threads that are running with this process as their owner.
- std::list<KThread*> m_thread_list;
+ const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
+ return m_watchpoints;
+ }
- /// List of shared memory that are running with this process as their owner.
- std::list<KSharedMemoryInfo*> m_shared_memory_list;
+public:
+ Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
+ KProcessAddress aslr_space_start, bool is_hbl);
- /// Address of the top of the main thread's stack
- KProcessAddress m_main_thread_stack_top{};
+ void LoadModule(CodeSet code_set, KProcessAddress base_addr);
- /// Size of the main thread's stack
- std::size_t m_main_thread_stack_size{};
+ Core::Memory::Memory& GetMemory() const;
- /// Memory usage capacity for the process
- std::size_t m_memory_usage_capacity{};
+public:
+ // Overridden parent functions.
+ bool IsInitialized() const override {
+ return m_is_initialized;
+ }
- /// Process total image size
- std::size_t m_image_size{};
+ static void PostDestroy(uintptr_t arg) {}
- /// Schedule count of this process
- s64 m_schedule_count{};
+ void Finalize() override;
- size_t m_memory_release_hint{};
+ u64 GetIdImpl() const {
+ return this->GetProcessId();
+ }
+ u64 GetId() const override {
+ return this->GetIdImpl();
+ }
- std::string name{};
+ virtual bool IsSignaled() const override {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+ return m_is_signaled;
+ }
- bool m_is_signaled{};
- bool m_is_suspended{};
- bool m_is_immortal{};
- bool m_is_handle_table_initialized{};
- bool m_is_initialized{};
- bool m_is_hbl{};
+ void DoWorkerTaskImpl();
- std::atomic<u16> m_num_running_threads{};
+private:
+ void ChangeState(State new_state) {
+ if (m_state != new_state) {
+ m_state = new_state;
+ m_is_signaled = true;
+ this->NotifyAvailable();
+ }
+ }
- std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
- std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
- std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
- std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
- std::map<KProcessAddress, u64> m_debug_page_refcounts;
+ Result InitializeHandleTable(s32 size) {
+ // Try to initialize the handle table.
+ R_TRY(m_handle_table.Initialize(size));
- KThread* m_exception_thread{};
+ // We succeeded, so note that we did.
+ m_is_handle_table_initialized = true;
+ R_SUCCEED();
+ }
- KLightLock m_state_lock;
- KLightLock m_list_lock;
+ void FinalizeHandleTable() {
+ // Finalize the table.
+ m_handle_table.Finalize();
- using TLPTree =
- Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
- using TLPIterator = TLPTree::iterator;
- TLPTree m_fully_used_tlp_tree;
- TLPTree m_partially_used_tlp_tree;
+ // Note that the table is finalized.
+ m_is_handle_table_initialized = false;
+ }
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h
new file mode 100644
index 000000000..9e40f68bc
--- /dev/null
+++ b/src/core/hle/kernel/k_process_page_table.h
@@ -0,0 +1,481 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Core {
+class ARM_Interface;
+}
+
+namespace Kernel {
+
+class KProcessPageTable {
+private:
+ KPageTable m_page_table;
+
+public:
+ KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {}
+
+ Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge,
+ bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit, Core::Memory::Memory& memory,
+ KProcessAddress aslr_space_start) {
+ R_RETURN(m_page_table.InitializeForProcess(
+ as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size,
+ system_resource, resource_limit, memory, aslr_space_start));
+ }
+
+ void Finalize() {
+ m_page_table.Finalize();
+ }
+
+ Core::Memory::Memory& GetMemory() {
+ return m_page_table.GetMemory();
+ }
+
+ Core::Memory::Memory& GetMemory() const {
+ return m_page_table.GetMemory();
+ }
+
+ Common::PageTable& GetImpl() {
+ return m_page_table.GetImpl();
+ }
+
+ Common::PageTable& GetImpl() const {
+ return m_page_table.GetImpl();
+ }
+
+ size_t GetNumGuardPages() const {
+ return m_page_table.GetNumGuardPages();
+ }
+
+ KScopedLightLock AcquireDeviceMapLock() {
+ return m_page_table.AcquireDeviceMapLock();
+ }
+
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
+ }
+
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
+ }
+
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr) {
+ R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
+ }
+
+ Result SetHeapSize(KProcessAddress* out, size_t size) {
+ R_RETURN(m_page_table.SetHeapSize(out, size));
+ }
+
+ Result SetMaxHeapSize(size_t size) {
+ R_RETURN(m_page_table.SetMaxHeapSize(size));
+ }
+
+ Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const {
+ R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
+ }
+
+ Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) {
+ R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
+ }
+
+ Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+ R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
+ }
+
+ Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+ R_RETURN(m_page_table.QueryIoMapping(out, address, size));
+ }
+
+ Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
+ }
+
+ Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
+ }
+
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
+ }
+
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
+ }
+
+ Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
+ }
+
+ Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping, Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
+ }
+
+ Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping) {
+ R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping));
+ }
+
+ Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
+ }
+
+ Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapRegion(region_type, perm));
+ }
+
+ Result MapInsecureMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapInsecureMemory(address, size));
+ }
+
+ Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
+ }
+
+ Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
+ }
+
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) {
+ R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
+ }
+
+ Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
+ R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
+ }
+
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state,
+ perm_mask, perm, attr_mask, attr));
+ }
+
+ Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
+ }
+
+ Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size));
+ }
+
+ Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state) {
+ R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state));
+ }
+
+ Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size));
+ }
+
+ Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state) {
+ R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state));
+ }
+
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned, bool check_heap) {
+ R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm,
+ is_aligned, check_heap));
+ }
+
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
+ R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap));
+ }
+
+ Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
+ }
+
+ Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
+ }
+
+ Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned) {
+ R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm,
+ is_aligned));
+ }
+
+ Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
+ }
+
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
+ }
+
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
+ }
+
+ Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
+ }
+
+ Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+ R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
+ }
+
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
+ }
+
+ Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+ R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
+ }
+
+ Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
+ }
+
+ Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask,
+ src_state, src_test_perm, src_attr_mask,
+ src_attr));
+ }
+
+ Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state,
+ KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask,
+ src_state, src_test_perm, src_attr_mask,
+ src_attr));
+ }
+
+ Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr) {
+ R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state,
+ dst_test_perm, dst_attr_mask, dst_attr,
+ src_addr));
+ }
+
+ Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ void* src_addr) {
+ R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask,
+ dst_state, dst_test_perm, dst_attr_mask,
+ dst_attr, src_addr));
+ }
+
+ Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr,
+ size_t size, KMemoryState dst_state_mask,
+ KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(
+ dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+ dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+ src_attr_mask, src_attr));
+ }
+
+ Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+ dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+ src_attr_mask, src_attr));
+ }
+
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KProcessPageTable& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send) {
+ R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table,
+ test_perm, dst_state, send));
+ }
+
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
+ R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
+ }
+
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
+ R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
+ }
+
+ Result MapPhysicalMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapPhysicalMemory(address, size));
+ }
+
+ Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
+ }
+
+ Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
+ }
+
+ Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
+ }
+
+ Result UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+ KProcessPageTable& src_page_table, KProcessAddress src_address) {
+ R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table,
+ src_address));
+ }
+
+ bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) {
+ return m_page_table.GetPhysicalAddress(out, address);
+ }
+
+ bool Contains(KProcessAddress addr, size_t size) const {
+ return m_page_table.Contains(addr, size);
+ }
+
+ bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInAliasRegion(addr, size);
+ }
+ bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInHeapRegion(addr, size);
+ }
+ bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInUnsafeAliasRegion(addr, size);
+ }
+
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ return m_page_table.CanContain(addr, size, state);
+ }
+
+ KProcessAddress GetAddressSpaceStart() const {
+ return m_page_table.GetAddressSpaceStart();
+ }
+ KProcessAddress GetHeapRegionStart() const {
+ return m_page_table.GetHeapRegionStart();
+ }
+ KProcessAddress GetAliasRegionStart() const {
+ return m_page_table.GetAliasRegionStart();
+ }
+ KProcessAddress GetStackRegionStart() const {
+ return m_page_table.GetStackRegionStart();
+ }
+ KProcessAddress GetKernelMapRegionStart() const {
+ return m_page_table.GetKernelMapRegionStart();
+ }
+ KProcessAddress GetCodeRegionStart() const {
+ return m_page_table.GetCodeRegionStart();
+ }
+ KProcessAddress GetAliasCodeRegionStart() const {
+ return m_page_table.GetAliasCodeRegionStart();
+ }
+
+ size_t GetAddressSpaceSize() const {
+ return m_page_table.GetAddressSpaceSize();
+ }
+ size_t GetHeapRegionSize() const {
+ return m_page_table.GetHeapRegionSize();
+ }
+ size_t GetAliasRegionSize() const {
+ return m_page_table.GetAliasRegionSize();
+ }
+ size_t GetStackRegionSize() const {
+ return m_page_table.GetStackRegionSize();
+ }
+ size_t GetKernelMapRegionSize() const {
+ return m_page_table.GetKernelMapRegionSize();
+ }
+ size_t GetCodeRegionSize() const {
+ return m_page_table.GetCodeRegionSize();
+ }
+ size_t GetAliasCodeRegionSize() const {
+ return m_page_table.GetAliasCodeRegionSize();
+ }
+
+ size_t GetNormalMemorySize() const {
+ return m_page_table.GetNormalMemorySize();
+ }
+
+ size_t GetCodeSize() const {
+ return m_page_table.GetCodeSize();
+ }
+ size_t GetCodeDataSize() const {
+ return m_page_table.GetCodeDataSize();
+ }
+
+ size_t GetAliasCodeSize() const {
+ return m_page_table.GetAliasCodeSize();
+ }
+ size_t GetAliasCodeDataSize() const {
+ return m_page_table.GetAliasCodeDataSize();
+ }
+
+ u32 GetAllocateOption() const {
+ return m_page_table.GetAllocateOption();
+ }
+
+ u32 GetAddressSpaceWidth() const {
+ return m_page_table.GetAddressSpaceWidth();
+ }
+
+ KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) {
+ return m_page_table.GetHeapPhysicalAddress(address);
+ }
+
+ u8* GetHeapVirtualPointer(KPhysicalAddress address) {
+ return m_page_table.GetHeapVirtualPointer(address);
+ }
+
+ KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) {
+ return m_page_table.GetHeapVirtualAddress(address);
+ }
+
+ KBlockInfoManager* GetBlockInfoManager() {
+ return m_page_table.GetBlockInfoManager();
+ }
+
+ KPageTable& GetBasePageTable() {
+ return m_page_table;
+ }
+
+ const KPageTable& GetBasePageTable() const {
+ return m_page_table;
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index d8143c650..1bce63a56 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -190,7 +190,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
if (m_state.should_count_idle) {
if (highest_thread != nullptr) [[likely]] {
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
- process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
+ process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count, 0);
}
} else {
m_state.idle_count++;
@@ -356,7 +356,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(m_core_id, tick_diff);
if (cur_process != nullptr) {
- cur_process->UpdateCPUTimeTicks(tick_diff);
+ cur_process->AddCpuTime(tick_diff);
}
m_last_context_switch_time = cur_tick;
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index c64ceb530..3ea653163 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) {
if (event != nullptr) {
// // Get the client process/page table.
// KProcess *client_process = client_thread->GetOwnerProcess();
- // KPageTable *client_page_table = std::addressof(client_process->PageTable());
+ // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable());
// // If we need to, reply with an async error.
// if (R_FAILED(client_result)) {
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index e6c8d589a..b51941faf 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -1,25 +1,100 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "core/core.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_system_resource.h"
namespace Kernel {
Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
KMemoryManager::Pool pool) {
- // Unimplemented
- UNREACHABLE();
+ // Set members.
+ m_resource_limit = resource_limit;
+ m_resource_size = size;
+ m_resource_pool = pool;
+
+ // Determine required size for our secure resource.
+ const size_t secure_size = this->CalculateRequiredSecureMemorySize();
+
+ // Reserve memory for our secure resource.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, secure_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate secure memory.
+ R_TRY(KSystemControl::AllocateSecureMemory(m_kernel, std::addressof(m_resource_address),
+ m_resource_size, static_cast<u32>(m_resource_pool)));
+ ASSERT(m_resource_address != 0);
+
+ // Ensure we clean up the secure memory, if we fail past this point.
+ ON_RESULT_FAILURE {
+ KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
+ static_cast<u32>(m_resource_pool));
+ };
+
+ // Check that our allocation is bigger than the reference counts needed for it.
+ const size_t rc_size =
+ Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(m_resource_size), PageSize);
+ R_UNLESS(m_resource_size > rc_size, ResultOutOfMemory);
+
+ // Get resource pointer.
+ KPhysicalAddress resource_paddr =
+ KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address);
+ auto* resource =
+ m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
+
+ // Initialize slab heaps.
+ m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size,
+ PageSize);
+ m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, resource);
+ m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
+ m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
+
+ // Initialize managers.
+ m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager),
+ std::addressof(m_page_table_heap));
+ m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager),
+ std::addressof(m_memory_block_heap));
+ m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager),
+ std::addressof(m_block_info_heap));
+
+ // Set our managers.
+ this->SetManagers(m_memory_block_slab_manager, m_block_info_manager, m_page_table_manager);
+
+ // Commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Open reference to our resource limit.
+ m_resource_limit->Open();
+
+ // Set ourselves as initialized.
+ m_is_initialized = true;
+
+ R_SUCCEED();
}
void KSecureSystemResource::Finalize() {
- // Unimplemented
- UNREACHABLE();
+ // Check that we have no outstanding allocations.
+ ASSERT(m_memory_block_slab_manager.GetUsed() == 0);
+ ASSERT(m_block_info_manager.GetUsed() == 0);
+ ASSERT(m_page_table_manager.GetUsed() == 0);
+
+ // Free our secure memory.
+ KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
+ static_cast<u32>(m_resource_pool));
+
+ // Release the memory reservation.
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ this->CalculateRequiredSecureMemorySize());
+
+ // Close reference to our resource limit.
+ m_resource_limit->Close();
}
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
KMemoryManager::Pool pool) {
- // Unimplemented
- UNREACHABLE();
+ return KSystemControl::CalculateRequiredSecureMemorySize(size, static_cast<u32>(pool));
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 7df8fd7f7..a6deb50ec 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -122,16 +122,15 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
case ThreadType::Main:
ASSERT(arg == 0);
[[fallthrough]];
- case ThreadType::HighPriority:
- [[fallthrough]];
- case ThreadType::Dummy:
- [[fallthrough]];
case ThreadType::User:
ASSERT(((owner == nullptr) ||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
break;
+ case ThreadType::HighPriority:
+ case ThreadType::Dummy:
+ break;
case ThreadType::Kernel:
UNIMPLEMENTED();
break;
@@ -216,6 +215,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
// Setup the TLS, if needed.
if (type == ThreadType::User) {
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
+ owner->GetMemory().ZeroBlock(m_tls_address, Svc::ThreadLocalRegionSize);
}
m_parent = owner;
@@ -403,7 +403,7 @@ void KThread::StartTermination() {
if (m_parent != nullptr) {
m_parent->ReleaseUserException(this);
if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
- m_parent->UnpinCurrentThread(m_core_id);
+ m_parent->UnpinCurrentThread();
}
}
@@ -415,10 +415,6 @@ void KThread::StartTermination() {
m_parent->ClearRunningThread(this);
}
- // Signal.
- m_signaled = true;
- KSynchronizationObject::NotifyAvailable();
-
// Clear previous thread in KScheduler.
KScheduler::ClearPreviousThread(m_kernel, this);
@@ -437,6 +433,13 @@ void KThread::FinishTermination() {
}
}
+ // Acquire the scheduler lock.
+ KScopedSchedulerLock sl{m_kernel};
+
+ // Signal.
+ m_signaled = true;
+ KSynchronizationObject::NotifyAvailable();
+
// Close the thread.
this->Close();
}
@@ -820,7 +823,7 @@ void KThread::CloneFpuStatus() {
ASSERT(this->GetOwnerProcess() != nullptr);
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
- if (this->GetOwnerProcess()->Is64BitProcess()) {
+ if (this->GetOwnerProcess()->Is64Bit()) {
// Clone FPSR and FPCR.
ThreadContext64 cur_ctx{};
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
@@ -923,7 +926,7 @@ Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
// If we're not terminating, get the thread's user context.
if (!this->IsTerminationRequested()) {
- if (m_parent->Is64BitProcess()) {
+ if (m_parent->Is64Bit()) {
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
auto context = GetContext64();
context.pstate &= 0xFF0FFE20;
@@ -1174,6 +1177,9 @@ Result KThread::Run() {
owner->IncrementRunningThreadCount();
}
+ // Open a reference, now that we're running.
+ this->Open();
+
// Set our state and finish.
this->SetState(ThreadState::Runnable);
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index d178c2453..e9ca5dfca 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -655,6 +655,21 @@ public:
return m_stack_top;
}
+public:
+ // TODO: This shouldn't be defined in kernel namespace
+ struct NativeExecutionParameters {
+ u64 tpidr_el0{};
+ u64 tpidrro_el0{};
+ void* native_context{};
+ std::atomic<u32> lock{1};
+ bool is_running{};
+ u32 magic{Common::MakeMagic('Y', 'U', 'Z', 'U')};
+ };
+
+ NativeExecutionParameters& GetNativeExecutionParameters() {
+ return m_native_execution_parameters;
+ }
+
private:
KThread* RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
bool is_kernel_address_key);
@@ -721,6 +736,7 @@ private:
// For core KThread implementation
ThreadContext32 m_thread_context_32{};
ThreadContext64 m_thread_context_64{};
+ Common::IntrusiveListNode m_process_list_node;
Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
s32 m_priority{};
using ConditionVariableThreadTreeTraits =
@@ -913,6 +929,7 @@ private:
ThreadWaitReasonForDebugging m_wait_reason_for_debugging{};
uintptr_t m_argument{};
KProcessAddress m_stack_top{};
+ NativeExecutionParameters m_native_execution_parameters{};
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index 2c45b4232..a632d1634 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
- const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr);
- ASSERT(phys_addr);
+ KPhysicalAddress phys_addr{};
+ ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr));
// Unmap the page.
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 24433d32b..4a1559291 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -101,35 +101,31 @@ struct KernelCore::Impl {
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id]->Initialize((*application_process).Is64BitProcess());
+ cores[core_id]->Initialize((*application_process).Is64Bit());
system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
}
}
- void CloseApplicationProcess() {
- KProcess* old_process = application_process.exchange(nullptr);
- if (old_process == nullptr) {
- return;
- }
-
- // old_process->Close();
- // TODO: The process should be destroyed based on accurate ref counting after
- // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
- old_process->Finalize();
- old_process->Destroy();
+ void TerminateApplicationProcess() {
+ application_process.load()->Terminate();
}
void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed);
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
- process_list.clear();
-
CloseServices();
+ auto* old_process = application_process.exchange(nullptr);
+ if (old_process) {
+ old_process->Close();
+ }
+
+ process_list.clear();
+
next_object_id = 0;
- next_kernel_process_id = KProcess::InitialKIPIDMin;
- next_user_process_id = KProcess::ProcessIDMin;
+ next_kernel_process_id = KProcess::InitialProcessIdMin;
+ next_user_process_id = KProcess::ProcessIdMin;
next_thread_id = 1;
global_handle_table->Finalize();
@@ -176,8 +172,6 @@ struct KernelCore::Impl {
}
}
- CloseApplicationProcess();
-
// Track kernel objects that were not freed on shutdown
{
std::scoped_lock lk{registered_objects_lock};
@@ -344,6 +338,8 @@ struct KernelCore::Impl {
// Create the system page table managers.
app_system_resource = std::make_unique<KSystemResource>(kernel);
sys_system_resource = std::make_unique<KSystemResource>(kernel);
+ KAutoObject::Create(std::addressof(*app_system_resource));
+ KAutoObject::Create(std::addressof(*sys_system_resource));
// Set the managers for the system resources.
app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
@@ -792,8 +788,8 @@ struct KernelCore::Impl {
std::mutex registered_in_use_objects_lock;
std::atomic<u32> next_object_id{0};
- std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
- std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
+ std::atomic<u64> next_kernel_process_id{KProcess::InitialProcessIdMin};
+ std::atomic<u64> next_user_process_id{KProcess::ProcessIdMin};
std::atomic<u64> next_thread_id{1};
// Lists all processes that exist in the current session.
@@ -924,10 +920,6 @@ const KProcess* KernelCore::ApplicationProcess() const {
return impl->application_process;
}
-void KernelCore::CloseApplicationProcess() {
- impl->CloseApplicationProcess();
-}
-
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list;
}
@@ -1128,8 +1120,8 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
std::function<void()> func) {
// Make a new process.
KProcess* process = KProcess::Create(*this);
- ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
- GetSystemResourceLimit())));
+ ASSERT(R_SUCCEEDED(
+ process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
SCOPE_EXIT({ process->Close(); });
@@ -1156,8 +1148,8 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
// Make a new process.
KProcess* process = KProcess::Create(*this);
- ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
- GetSystemResourceLimit())));
+ ASSERT(R_SUCCEEDED(
+ process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
SCOPE_EXIT({ process->Close(); });
@@ -1266,7 +1258,8 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
void KernelCore::SuspendApplication(bool suspended) {
const bool should_suspend{exception_exited || suspended};
- const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
+ const auto activity =
+ should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
// Get the application process.
KScopedAutoObject<KProcess> process = ApplicationProcess();
@@ -1300,6 +1293,8 @@ void KernelCore::SuspendApplication(bool suspended) {
}
void KernelCore::ShutdownCores() {
+ impl->TerminateApplicationProcess();
+
KScopedSchedulerLock lk{*this};
for (auto* thread : impl->shutdown_threads) {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index d5b08eeb5..d8086c0ea 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -134,9 +134,6 @@ public:
/// Retrieves a const pointer to the application process.
const KProcess* ApplicationProcess() const;
- /// Closes the application process.
- void CloseApplicationProcess();
-
/// Retrieves the list of processes.
const std::vector<KProcess*>& GetProcessList() const;
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 5ee869fa2..073039825 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -1,8 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "common/settings.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#ifdef HAS_NCE
+#include "core/arm/nce/arm_nce.h"
+#endif
#include "core/core.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
@@ -14,7 +18,8 @@ PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KSchedu
: m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
- // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
+ // an NCE interface or a 32-bit instance of Dynarmic. This should be abstracted out to a CPU
+ // manager.
auto& kernel = system.Kernel();
m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
system, kernel.IsMulticore(),
@@ -28,6 +33,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KSchedu
PhysicalCore::~PhysicalCore() = default;
void PhysicalCore::Initialize(bool is_64_bit) {
+#if defined(HAS_NCE)
+ if (Settings::IsNceEnabled()) {
+ m_arm_interface = std::make_unique<Core::ARM_NCE>(m_system, m_system.Kernel().IsMulticore(),
+ m_core_index);
+ return;
+ }
+#endif
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
auto& kernel = m_system.Kernel();
if (!is_64_bit) {
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
deleted file mode 100644
index 773319ad8..000000000
--- a/src/core/hle/kernel/process_capability.cpp
+++ /dev/null
@@ -1,389 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <bit>
-
-#include "common/bit_util.h"
-#include "common/logging/log.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/svc_results.h"
-
-namespace Kernel {
-namespace {
-
-// clang-format off
-
-// Shift offsets for kernel capability types.
-enum : u32 {
- CapabilityOffset_PriorityAndCoreNum = 3,
- CapabilityOffset_Syscall = 4,
- CapabilityOffset_MapPhysical = 6,
- CapabilityOffset_MapIO = 7,
- CapabilityOffset_MapRegion = 10,
- CapabilityOffset_Interrupt = 11,
- CapabilityOffset_ProgramType = 13,
- CapabilityOffset_KernelVersion = 14,
- CapabilityOffset_HandleTableSize = 15,
- CapabilityOffset_Debug = 16,
-};
-
-// Combined mask of all parameters that may be initialized only once.
-constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) |
- (1U << CapabilityOffset_ProgramType) |
- (1U << CapabilityOffset_KernelVersion) |
- (1U << CapabilityOffset_HandleTableSize) |
- (1U << CapabilityOffset_Debug);
-
-// Packed kernel version indicating 10.4.0
-constexpr u32 PackedKernelVersion = 0x520000;
-
-// Indicates possible types of capabilities that can be specified.
-enum class CapabilityType : u32 {
- Unset = 0U,
- PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1,
- Syscall = (1U << CapabilityOffset_Syscall) - 1,
- MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1,
- MapIO = (1U << CapabilityOffset_MapIO) - 1,
- MapRegion = (1U << CapabilityOffset_MapRegion) - 1,
- Interrupt = (1U << CapabilityOffset_Interrupt) - 1,
- ProgramType = (1U << CapabilityOffset_ProgramType) - 1,
- KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1,
- HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1,
- Debug = (1U << CapabilityOffset_Debug) - 1,
- Ignorable = 0xFFFFFFFFU,
-};
-
-// clang-format on
-
-constexpr CapabilityType GetCapabilityType(u32 value) {
- return static_cast<CapabilityType>((~value & (value + 1)) - 1);
-}
-
-u32 GetFlagBitOffset(CapabilityType type) {
- const auto value = static_cast<u32>(type);
- return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
-}
-
-} // Anonymous namespace
-
-Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
- Clear();
-
- // Allow all cores and priorities.
- core_mask = 0xF;
- priority_mask = 0xFFFFFFFFFFFFFFFF;
- kernel_version = PackedKernelVersion;
-
- return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
- Clear();
-
- return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-void ProcessCapabilities::InitializeForMetadatalessProcess() {
- // Allow all cores and priorities
- core_mask = 0xF;
- priority_mask = 0xFFFFFFFFFFFFFFFF;
- kernel_version = PackedKernelVersion;
-
- // Allow all system calls and interrupts.
- svc_capabilities.set();
- interrupt_capabilities.set();
-
- // Allow using the maximum possible amount of handles
- handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
-
- // Allow all debugging capabilities.
- is_debuggable = true;
- can_force_debug = true;
-}
-
-Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table) {
- u32 set_flags = 0;
- u32 set_svc_bits = 0;
-
- for (std::size_t i = 0; i < num_capabilities; ++i) {
- const u32 descriptor = capabilities[i];
- const auto type = GetCapabilityType(descriptor);
-
- if (type == CapabilityType::MapPhysical) {
- i++;
-
- // The MapPhysical type uses two descriptor flags for its parameters.
- // If there's only one, then there's a problem.
- if (i >= num_capabilities) {
- LOG_ERROR(Kernel, "Invalid combination! i={}", i);
- return ResultInvalidCombination;
- }
-
- const auto size_flags = capabilities[i];
- if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
- LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
- return ResultInvalidCombination;
- }
-
- const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
- if (result.IsError()) {
- LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
- descriptor, size_flags);
- return result;
- }
- } else {
- const auto result =
- ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
- if (result.IsError()) {
- LOG_ERROR(
- Kernel,
- "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
- set_flags, set_svc_bits, descriptor);
- return result;
- }
- }
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table) {
- const auto type = GetCapabilityType(flag);
-
- if (type == CapabilityType::Unset) {
- return ResultInvalidArgument;
- }
-
- // Bail early on ignorable entries, as one would expect,
- // ignorable descriptors can be ignored.
- if (type == CapabilityType::Ignorable) {
- return ResultSuccess;
- }
-
- // Ensure that the give flag hasn't already been initialized before.
- // If it has been, then bail.
- const u32 flag_length = GetFlagBitOffset(type);
- const u32 set_flag = 1U << flag_length;
- if ((set_flag & set_flags & InitializeOnceMask) != 0) {
- LOG_ERROR(Kernel,
- "Attempted to initialize flags that may only be initialized once. set_flags={}",
- set_flags);
- return ResultInvalidCombination;
- }
- set_flags |= set_flag;
-
- switch (type) {
- case CapabilityType::PriorityAndCoreNum:
- return HandlePriorityCoreNumFlags(flag);
- case CapabilityType::Syscall:
- return HandleSyscallFlags(set_svc_bits, flag);
- case CapabilityType::MapIO:
- return HandleMapIOFlags(flag, page_table);
- case CapabilityType::MapRegion:
- return HandleMapRegionFlags(flag, page_table);
- case CapabilityType::Interrupt:
- return HandleInterruptFlags(flag);
- case CapabilityType::ProgramType:
- return HandleProgramTypeFlags(flag);
- case CapabilityType::KernelVersion:
- return HandleKernelVersionFlags(flag);
- case CapabilityType::HandleTableSize:
- return HandleHandleTableFlags(flag);
- case CapabilityType::Debug:
- return HandleDebugFlags(flag);
- default:
- break;
- }
-
- LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
- return ResultInvalidArgument;
-}
-
-void ProcessCapabilities::Clear() {
- svc_capabilities.reset();
- interrupt_capabilities.reset();
-
- core_mask = 0;
- priority_mask = 0;
-
- handle_table_size = 0;
- kernel_version = 0;
-
- program_type = ProgramType::SysModule;
-
- is_debuggable = false;
- can_force_debug = false;
-}
-
-Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
- if (priority_mask != 0 || core_mask != 0) {
- LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
- priority_mask, core_mask);
- return ResultInvalidArgument;
- }
-
- const u32 core_num_min = (flags >> 16) & 0xFF;
- const u32 core_num_max = (flags >> 24) & 0xFF;
- if (core_num_min > core_num_max) {
- LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
- core_num_min, core_num_max);
- return ResultInvalidCombination;
- }
-
- const u32 priority_min = (flags >> 10) & 0x3F;
- const u32 priority_max = (flags >> 4) & 0x3F;
- if (priority_min > priority_max) {
- LOG_ERROR(Kernel,
- "Priority min is greater than priority max! priority_min={}, priority_max={}",
- core_num_min, priority_max);
- return ResultInvalidCombination;
- }
-
- // The switch only has 4 usable cores.
- if (core_num_max >= 4) {
- LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
- return ResultInvalidCoreId;
- }
-
- const auto make_mask = [](u64 min, u64 max) {
- const u64 range = max - min + 1;
- const u64 mask = (1ULL << range) - 1;
-
- return mask << min;
- };
-
- core_mask = make_mask(core_num_min, core_num_max);
- priority_mask = make_mask(priority_min, priority_max);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
- const u32 index = flags >> 29;
- const u32 svc_bit = 1U << index;
-
- // If we've already set this svc before, bail.
- if ((set_svc_bits & svc_bit) != 0) {
- return ResultInvalidCombination;
- }
- set_svc_bits |= svc_bit;
-
- const u32 svc_mask = (flags >> 5) & 0xFFFFFF;
- for (u32 i = 0; i < 24; ++i) {
- const u32 svc_number = index * 24 + i;
-
- if ((svc_mask & (1U << i)) == 0) {
- continue;
- }
-
- svc_capabilities[svc_number] = true;
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
- KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
- constexpr u32 interrupt_ignore_value = 0x3FF;
- const u32 interrupt0 = (flags >> 12) & 0x3FF;
- const u32 interrupt1 = (flags >> 22) & 0x3FF;
-
- for (u32 interrupt : {interrupt0, interrupt1}) {
- if (interrupt == interrupt_ignore_value) {
- continue;
- }
-
- // NOTE:
- // This should be checking a generic interrupt controller value
- // as part of the calculation, however, given we don't currently
- // emulate that, it's sufficient to mark every interrupt as defined.
-
- if (interrupt >= interrupt_capabilities.size()) {
- LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
- interrupt);
- return ResultOutOfRange;
- }
-
- interrupt_capabilities[interrupt] = true;
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
- const u32 reserved = flags >> 17;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
- // Yes, the internal member variable is checked in the actual kernel here.
- // This might look odd for options that are only allowed to be initialized
- // just once, however the kernel has a separate initialization function for
- // kernel processes and userland processes. The kernel variant sets this
- // member variable ahead of time.
-
- const u32 major_version = kernel_version >> 19;
-
- if (major_version != 0 || flags < 0x80000) {
- LOG_ERROR(Kernel,
- "Kernel version is non zero or flags are too small! major_version={}, flags={}",
- major_version, flags);
- return ResultInvalidArgument;
- }
-
- kernel_version = flags;
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
- const u32 reserved = flags >> 26;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
- const u32 reserved = flags >> 19;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- is_debuggable = (flags & 0x20000) != 0;
- can_force_debug = (flags & 0x40000) != 0;
- return ResultSuccess;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
deleted file mode 100644
index ff05dc5ff..000000000
--- a/src/core/hle/kernel/process_capability.h
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <bitset>
-
-#include "common/common_types.h"
-
-union Result;
-
-namespace Kernel {
-
-class KPageTable;
-
-/// The possible types of programs that may be indicated
-/// by the program type capability descriptor.
-enum class ProgramType {
- SysModule,
- Application,
- Applet,
-};
-
-/// Handles kernel capability descriptors that are provided by
-/// application metadata. These descriptors provide information
-/// that alters certain parameters for kernel process instance
-/// that will run said application (or applet).
-///
-/// Capabilities are a sequence of flag descriptors, that indicate various
-/// configurations and constraints for a particular process.
-///
-/// Flag types are indicated by a sequence of set low bits. E.g. the
-/// types are indicated with the low bits as follows (where x indicates "don't care"):
-///
-/// - Priority and core mask : 0bxxxxxxxxxxxx0111
-/// - Allowed service call mask: 0bxxxxxxxxxxx01111
-/// - Map physical memory : 0bxxxxxxxxx0111111
-/// - Map IO memory : 0bxxxxxxxx01111111
-/// - Interrupts : 0bxxxx011111111111
-/// - Application type : 0bxx01111111111111
-/// - Kernel version : 0bx011111111111111
-/// - Handle table size : 0b0111111111111111
-/// - Debugger flags : 0b1111111111111111
-///
-/// These are essentially a bit offset subtracted by 1 to create a mask.
-/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000)
-/// subtracted by one (7 -> 0b0111)
-///
-/// An example of a bit layout (using the map physical layout):
-/// <example>
-/// The MapPhysical type indicates a sequence entry pair of:
-///
-/// [initial, memory_flags], where:
-///
-/// initial:
-/// bits:
-/// 7-24: Starting page to map memory at.
-/// 25 : Indicates if the memory should be mapped as read only.
-///
-/// memory_flags:
-/// bits:
-/// 7-20 : Number of pages to map
-/// 21-25: Seems to be reserved (still checked against though)
-/// 26 : Whether or not the memory being mapped is IO memory, or physical memory
-/// </example>
-///
-class ProcessCapabilities {
-public:
- using InterruptCapabilities = std::bitset<1024>;
- using SyscallCapabilities = std::bitset<192>;
-
- ProcessCapabilities() = default;
- ProcessCapabilities(const ProcessCapabilities&) = delete;
- ProcessCapabilities(ProcessCapabilities&&) = default;
-
- ProcessCapabilities& operator=(const ProcessCapabilities&) = delete;
- ProcessCapabilities& operator=(ProcessCapabilities&&) = default;
-
- /// Initializes this process capabilities instance for a kernel process.
- ///
- /// @param capabilities The capabilities to parse
- /// @param num_capabilities The number of capabilities to parse.
- /// @param page_table The memory manager to use for handling any mapping-related
- /// operations (such as mapping IO memory, etc).
- ///
- /// @returns ResultSuccess if this capabilities instance was able to be initialized,
- /// otherwise, an error code upon failure.
- ///
- Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Initializes this process capabilities instance for a userland process.
- ///
- /// @param capabilities The capabilities to parse.
- /// @param num_capabilities The total number of capabilities to parse.
- /// @param page_table The memory manager to use for handling any mapping-related
- /// operations (such as mapping IO memory, etc).
- ///
- /// @returns ResultSuccess if this capabilities instance was able to be initialized,
- /// otherwise, an error code upon failure.
- ///
- Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Initializes this process capabilities instance for a process that does not
- /// have any metadata to parse.
- ///
- /// This is necessary, as we allow running raw executables, and the internal
- /// kernel process capabilities also determine what CPU cores the process is
- /// allowed to run on, and what priorities are allowed for threads. It also
- /// determines the max handle table size, what the program type is, whether or
- /// not the process can be debugged, or whether it's possible for a process to
- /// forcibly debug another process.
- ///
- /// Given the above, this essentially enables all capabilities across the board
- /// for the process. It allows the process to:
- ///
- /// - Run on any core
- /// - Use any thread priority
- /// - Use the maximum amount of handles a process is allowed to.
- /// - Be debuggable
- /// - Forcibly debug other processes.
- ///
- /// Note that this is not a behavior that the kernel allows a process to do via
- /// a single function like this. This is yuzu-specific behavior to handle
- /// executables with no capability descriptors whatsoever to derive behavior from.
- /// It being yuzu-specific is why this is also not the default behavior and not
- /// done by default in the constructor.
- ///
- void InitializeForMetadatalessProcess();
-
- /// Gets the allowable core mask
- u64 GetCoreMask() const {
- return core_mask;
- }
-
- /// Gets the allowable priority mask
- u64 GetPriorityMask() const {
- return priority_mask;
- }
-
- /// Gets the SVC access permission bits
- const SyscallCapabilities& GetServiceCapabilities() const {
- return svc_capabilities;
- }
-
- /// Gets the valid interrupt bits.
- const InterruptCapabilities& GetInterruptCapabilities() const {
- return interrupt_capabilities;
- }
-
- /// Gets the program type for this process.
- ProgramType GetProgramType() const {
- return program_type;
- }
-
- /// Gets the number of total allowable handles for the process' handle table.
- s32 GetHandleTableSize() const {
- return handle_table_size;
- }
-
- /// Gets the kernel version value.
- u32 GetKernelVersion() const {
- return kernel_version;
- }
-
- /// Whether or not this process can be debugged.
- bool IsDebuggable() const {
- return is_debuggable;
- }
-
- /// Whether or not this process can forcibly debug another
- /// process, even if that process is not considered debuggable.
- bool CanForceDebug() const {
- return can_force_debug;
- }
-
-private:
- /// Attempts to parse a given sequence of capability descriptors.
- ///
- /// @param capabilities The sequence of capability descriptors to parse.
- /// @param num_capabilities The number of descriptors within the given sequence.
- /// @param page_table The memory manager that will perform any memory
- /// mapping if necessary.
- ///
- /// @return ResultSuccess if no errors occur, otherwise an error code.
- ///
- Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Attempts to parse a capability descriptor that is only represented by a
- /// single flag set.
- ///
- /// @param set_flags Running set of flags that are used to catch
- /// flags being initialized more than once when they shouldn't be.
- /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
- /// @param flag The flag to attempt to parse.
- /// @param page_table The memory manager that will perform any memory
- /// mapping if necessary.
- ///
- /// @return ResultSuccess if no errors occurred, otherwise an error code.
- ///
- Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table);
-
- /// Clears the internal state of this process capability instance. Necessary,
- /// to have a sane starting point due to us allowing running executables without
- /// configuration metadata. We assume a process is not going to have metadata,
- /// and if it turns out that the process does, in fact, have metadata, then
- /// we attempt to parse it. Thus, we need this to reset data members back to
- /// a good state.
- ///
- /// DO NOT ever make this a public member function. This isn't an invariant
- /// anything external should depend upon (and if anything comes to rely on it,
- /// you should immediately be questioning the design of that thing, not this
- /// class. If the kernel itself can run without depending on behavior like that,
- /// then so can yuzu).
- ///
- void Clear();
-
- /// Handles flags related to the priority and core number capability flags.
- Result HandlePriorityCoreNumFlags(u32 flags);
-
- /// Handles flags related to determining the allowable SVC mask.
- Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
-
- /// Handles flags related to mapping physical memory pages.
- Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
-
- /// Handles flags related to mapping IO pages.
- Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
-
- /// Handles flags related to mapping physical memory regions.
- Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
-
- /// Handles flags related to the interrupt capability flags.
- Result HandleInterruptFlags(u32 flags);
-
- /// Handles flags related to the program type.
- Result HandleProgramTypeFlags(u32 flags);
-
- /// Handles flags related to the handle table size.
- Result HandleHandleTableFlags(u32 flags);
-
- /// Handles flags related to the kernel version capability flags.
- Result HandleKernelVersionFlags(u32 flags);
-
- /// Handles flags related to debug-specific capabilities.
- Result HandleDebugFlags(u32 flags);
-
- SyscallCapabilities svc_capabilities;
- InterruptCapabilities interrupt_capabilities;
-
- u64 core_mask = 0;
- u64 priority_mask = 0;
-
- s32 handle_table_size = 0;
- u32 kernel_version = 0;
-
- ProgramType program_type = ProgramType::SysModule;
-
- bool is_debuggable = false;
- bool can_force_debug = false;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 871d541d4..b76683969 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -4426,7 +4426,7 @@ void Call(Core::System& system, u32 imm) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
+ if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
Call64(system, imm);
} else {
Call32(system, imm);
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index f99964028..ada998772 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -86,20 +86,19 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::TotalMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailable();
+ *result = process->GetTotalUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::UsedMemorySize:
- *result = process->GetTotalPhysicalMemoryUsed();
+ *result = process->GetUsedUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::SystemResourceSizeTotal:
- *result = process->GetSystemResourceSize();
+ *result = process->GetTotalSystemResourceSize();
R_SUCCEED();
case InfoType::SystemResourceSizeUsed:
- LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
- *result = process->GetSystemResourceUsage();
+ *result = process->GetUsedSystemResourceSize();
R_SUCCEED();
case InfoType::ProgramId:
@@ -111,20 +110,29 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::TotalNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
+ *result = process->GetTotalNonSystemUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::UsedNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
+ *result = process->GetUsedNonSystemUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::IsApplication:
LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
- *result = true;
+ *result = process->IsApplication();
R_SUCCEED();
case InfoType::FreeThreadCount:
- *result = process->GetFreeThreadCount();
+ if (KResourceLimit* resource_limit = process->GetResourceLimit();
+ resource_limit != nullptr) {
+ const auto current_value =
+ resource_limit->GetCurrentValue(Svc::LimitableResource::ThreadCountMax);
+ const auto limit_value =
+ resource_limit->GetLimitValue(Svc::LimitableResource::ThreadCountMax);
+ *result = limit_value - current_value;
+ } else {
+ *result = 0;
+ }
R_SUCCEED();
default:
@@ -161,7 +169,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
case InfoType::RandomEntropy:
R_UNLESS(handle == 0, ResultInvalidHandle);
- R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination);
+ R_UNLESS(info_sub_id < 4, ResultInvalidCombination);
*result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
R_SUCCEED();
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
index 1d7bc4246..5f0833fcb 100644
--- a/src/core/hle/kernel/svc/svc_lock.cpp
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -17,7 +17,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u3
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
- R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag));
+ R_RETURN(KConditionVariable::WaitForAddress(system.Kernel(), thread_handle, address, tag));
}
/// Unlock a mutex
@@ -28,7 +28,7 @@ Result ArbitrateUnlock(Core::System& system, u64 address) {
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
- R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address));
+ R_RETURN(KConditionVariable::SignalToAddress(system.Kernel(), address));
}
Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 97f1210de..4ca62860d 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) {
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
-Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
+Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr,
+ u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
R_THROW(ResultInvalidAddress);
@@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Set the memory attribute.
- R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
+ R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
+ static_cast<KMemoryAttribute>(attr)));
}
/// Maps a memory range into a different range.
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index d3545f232..793e9f8d0 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
// Set the heap size.
- R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size));
+ KProcessAddress address{};
+ R_TRY(GetCurrentProcess(system.Kernel())
+ .GetPageTable()
+ .SetHeapSize(std::addressof(address), size));
+
+ // We succeeded.
+ *out_address = GetInteger(address);
+ R_SUCCEED();
}
/// Maps memory at a desired address
@@ -46,7 +53,7 @@ Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
auto& page_table{current_process->GetPageTable()};
- if (current_process->GetSystemResourceSize() == 0) {
+ if (current_process->GetTotalSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
R_THROW(ResultInvalidState);
}
@@ -95,7 +102,7 @@ Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
auto& page_table{current_process->GetPageTable()};
- if (current_process->GetSystemResourceSize() == 0) {
+ if (current_process->GetTotalSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
R_THROW(ResultInvalidState);
}
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index 07cd48175..e1427947b 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
R_THROW(ResultInvalidCurrentMemory);
}
- R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size,
- KPageTable::ICacheInvalidationStrategy::InvalidateAll));
+ R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size));
}
Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index 51af06e97..816dcb8d0 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
}
auto& current_memory{GetCurrentMemory(system.Kernel())};
- const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()};
- current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
+ KMemoryInfo mem_info;
+ R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address));
- //! This is supposed to be part of the QueryInfo call.
- *out_page_info = {};
+ const auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+ current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info));
R_SUCCEED();
}
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 8ebc1bd1c..6c79cfd8d 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -132,7 +132,7 @@ void SynchronizePreemptionState(Core::System& system) {
GetCurrentThread(kernel).ClearInterruptFlag();
// Unpin the current thread.
- cur_process->UnpinCurrentThread(core_id);
+ cur_process->UnpinCurrentThread();
}
}
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 933b82e30..755fd62b5 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -85,10 +85,6 @@ Result StartThread(Core::System& system, Handle thread_handle) {
// Try to start the thread.
R_TRY(thread->Run());
- // If we succeeded, persist a reference to the thread.
- thread->Open();
- system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
-
R_SUCCEED();
}
@@ -99,7 +95,6 @@ void ExitThread(Core::System& system) {
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
- system.Kernel().UnregisterInUseObject(current_thread);
}
/// Sleep the current thread
@@ -260,7 +255,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_
auto list_iter = thread_list.cbegin();
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
- memory.Write64(out_thread_ids, (*list_iter)->GetThreadId());
+ memory.Write64(out_thread_ids, list_iter->GetThreadId());
out_thread_ids += sizeof(u64);
}
diff --git a/src/core/hle/kernel/svc_generator.py b/src/core/hle/kernel/svc_generator.py
index 7fcbb1ba1..5531faac6 100644
--- a/src/core/hle/kernel/svc_generator.py
+++ b/src/core/hle/kernel/svc_generator.py
@@ -592,7 +592,7 @@ void Call(Core::System& system, u32 imm) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
+ if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
Call64(system, imm);
} else {
Call32(system, imm);
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 251e6013c..50de02e36 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -604,13 +604,57 @@ enum class ProcessActivity : u32 {
Paused,
};
+enum class CreateProcessFlag : u32 {
+ // Is 64 bit?
+ Is64Bit = (1 << 0),
+
+ // What kind of address space?
+ AddressSpaceShift = 1,
+ AddressSpaceMask = (7 << AddressSpaceShift),
+ AddressSpace32Bit = (0 << AddressSpaceShift),
+ AddressSpace64BitDeprecated = (1 << AddressSpaceShift),
+ AddressSpace32BitWithoutAlias = (2 << AddressSpaceShift),
+ AddressSpace64Bit = (3 << AddressSpaceShift),
+
+ // Should JIT debug be done on crash?
+ EnableDebug = (1 << 4),
+
+ // Should ASLR be enabled for the process?
+ EnableAslr = (1 << 5),
+
+ // Is the process an application?
+ IsApplication = (1 << 6),
+
+ // 4.x deprecated: Should use secure memory?
+ DeprecatedUseSecureMemory = (1 << 7),
+
+ // 5.x+ Pool partition type.
+ PoolPartitionShift = 7,
+ PoolPartitionMask = (0xF << PoolPartitionShift),
+ PoolPartitionApplication = (0 << PoolPartitionShift),
+ PoolPartitionApplet = (1 << PoolPartitionShift),
+ PoolPartitionSystem = (2 << PoolPartitionShift),
+ PoolPartitionSystemNonSecure = (3 << PoolPartitionShift),
+
+ // 7.x+ Should memory allocation be optimized? This requires IsApplication.
+ OptimizeMemoryAllocation = (1 << 11),
+
+ // 11.x+ DisableDeviceAddressSpaceMerge.
+ DisableDeviceAddressSpaceMerge = (1 << 12),
+
+ // Mask of all flags.
+ All = Is64Bit | AddressSpaceMask | EnableDebug | EnableAslr | IsApplication |
+ PoolPartitionMask | OptimizeMemoryAllocation | DisableDeviceAddressSpaceMerge,
+};
+DECLARE_ENUM_FLAG_OPERATORS(CreateProcessFlag);
+
struct CreateProcessParameter {
std::array<char, 12> name;
u32 version;
u64 program_id;
u64 code_address;
s32 code_num_pages;
- u32 flags;
+ CreateProcessFlag flags;
Handle reslimit;
s32 system_resource_num_pages;
};