summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
authorliamwhite <liamwhite@users.noreply.github.com>2022-11-05 18:25:29 +0100
committerGitHub <noreply@github.com>2022-11-05 18:25:29 +0100
commit4c198bbf06fe7b72d4718cf3571e99b6169e9f47 (patch)
tree3f091be57ce7198ee53b721b160a454ef6dee913 /src/core/hle
parentMerge pull request #9189 from vonchenplus/stupid (diff)
parentcore: hle: kernel: Address review comments. (diff)
downloadyuzu-4c198bbf06fe7b72d4718cf3571e99b6169e9f47.tar
yuzu-4c198bbf06fe7b72d4718cf3571e99b6169e9f47.tar.gz
yuzu-4c198bbf06fe7b72d4718cf3571e99b6169e9f47.tar.bz2
yuzu-4c198bbf06fe7b72d4718cf3571e99b6169e9f47.tar.lz
yuzu-4c198bbf06fe7b72d4718cf3571e99b6169e9f47.tar.xz
yuzu-4c198bbf06fe7b72d4718cf3571e99b6169e9f47.tar.zst
yuzu-4c198bbf06fe7b72d4718cf3571e99b6169e9f47.zip
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h4
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp74
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.h2
-rw-r--r--src/core/hle/kernel/k_class_token.cpp3
-rw-r--r--src/core/hle/kernel/k_class_token.h6
-rw-r--r--src/core/hle/kernel/k_debug.h20
-rw-r--r--src/core/hle/kernel/k_dynamic_page_manager.h67
-rw-r--r--src/core/hle/kernel/k_dynamic_resource_manager.h3
-rw-r--r--src/core/hle/kernel/k_event_info.h64
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp33
-rw-r--r--src/core/hle/kernel/k_handle_table.h106
-rw-r--r--src/core/hle/kernel/k_memory_block.h110
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp10
-rw-r--r--src/core/hle/kernel/k_memory_layout.h19
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp270
-rw-r--r--src/core/hle/kernel/k_memory_manager.h259
-rw-r--r--src/core/hle/kernel/k_memory_region_type.h123
-rw-r--r--src/core/hle/kernel/k_page_bitmap.h243
-rw-r--r--src/core/hle/kernel/k_page_buffer.h14
-rw-r--r--src/core/hle/kernel/k_page_group.h86
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp86
-rw-r--r--src/core/hle/kernel/k_page_heap.h39
-rw-r--r--src/core/hle/kernel/k_page_table.cpp1129
-rw-r--r--src/core/hle/kernel/k_page_table.h142
-rw-r--r--src/core/hle/kernel/k_page_table_manager.h55
-rw-r--r--src/core/hle/kernel/k_page_table_slab_heap.h93
-rw-r--r--src/core/hle/kernel/k_process.cpp4
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp26
-rw-r--r--src/core/hle/kernel/k_system_resource.h137
-rw-r--r--src/core/hle/kernel/kernel.cpp127
-rw-r--r--src/core/hle/kernel/kernel.h23
-rw-r--r--src/core/hle/kernel/slab_helpers.h78
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/kernel/svc_results.h1
-rw-r--r--src/core/hle/kernel/svc_types.h40
-rw-r--r--src/core/hle/result.h13
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp6
37 files changed, 2780 insertions, 737 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index fe375769e..4b717d091 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -9,6 +9,10 @@ namespace Kernel::Board::Nintendo::Nx {
class KSystemControl {
public:
+ // This can be overridden as needed.
+ static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB
+
+public:
class Init {
public:
// Initialization.
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 477e4e407..aa2dddcc6 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -10,7 +10,9 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
+#include "core/hle/kernel/k_debug.h"
#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_event_info.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_buffer.h"
@@ -22,6 +24,7 @@
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
#include "core/hle/kernel/k_system_control.h"
+#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
@@ -44,7 +47,10 @@ namespace Kernel::Init {
HANDLER(KThreadLocalPage, \
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
##__VA_ARGS__) \
- HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
+ HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \
+ HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
+ HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
+ HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__)
namespace {
@@ -73,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
constexpr size_t SlabCountKIoPool = 1;
constexpr size_t SlabCountKIoRegion = 6;
+constexpr size_t SlabcountKSessionRequestMappings = 40;
-constexpr size_t SlabCountExtraKThread = 160;
+constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread;
+
+namespace test {
+
+static_assert(KernelPageBufferHeapSize ==
+ 2 * PageSize + (SlabCountKProcess + SlabCountKThread +
+ (SlabCountKProcess + SlabCountKThread) / 8) *
+ PageSize);
+static_assert(KernelPageBufferAdditionalSize ==
+ (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize);
+
+} // namespace test
/// Helper function to translate from the slab virtual address to the reserved location in physical
/// memory.
@@ -109,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
}
size_t CalculateSlabHeapGapSize() {
- constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
+ constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB;
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
return KernelSlabHeapGapSize;
}
@@ -134,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
.num_KDebug = SlabCountKDebug,
.num_KIoPool = SlabCountKIoPool,
.num_KIoRegion = SlabCountKIoRegion,
+ .num_KSessionRequestMappings = SlabcountKSessionRequestMappings,
};
}
@@ -164,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
return size;
}
-void InitializeKPageBufferSlabHeap(Core::System& system) {
- auto& kernel = system.Kernel();
-
- const auto& counts = kernel.SlabResourceCounts();
- const size_t num_pages =
- counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
- const size_t slab_size = num_pages * PageSize;
-
- // Reserve memory from the system resource limit.
- ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
-
- // Allocate memory for the slab.
- constexpr auto AllocateOption = KMemoryManager::EncodeOption(
- KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
- const PAddr slab_address =
- kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
- ASSERT(slab_address != 0);
-
- // Initialize the slabheap.
- KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
- slab_size);
-}
-
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
@@ -258,3 +254,29 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
} // namespace Kernel::Init
+
+namespace Kernel {
+
+void KPageBufferSlabHeap::Initialize(Core::System& system) {
+ auto& kernel = system.Kernel();
+ const auto& counts = kernel.SlabResourceCounts();
+ const size_t num_pages =
+ counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
+ const size_t slab_size = num_pages * PageSize;
+
+ // Reserve memory from the system resource limit.
+ ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
+
+ // Allocate memory for the slab.
+ constexpr auto AllocateOption = KMemoryManager::EncodeOption(
+ KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
+ const PAddr slab_address =
+ kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
+ ASSERT(slab_address != 0);
+
+ // Initialize the slabheap.
+ KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
+ slab_size);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h
index 13be63c87..5e22821bc 100644
--- a/src/core/hle/kernel/init/init_slab_setup.h
+++ b/src/core/hle/kernel/init/init_slab_setup.h
@@ -33,11 +33,11 @@ struct KSlabResourceCounts {
size_t num_KDebug;
size_t num_KIoPool;
size_t num_KIoRegion;
+ size_t num_KSessionRequestMappings;
};
void InitializeSlabResourceCounts(KernelCore& kernel);
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
-void InitializeKPageBufferSlabHeap(Core::System& system);
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
} // namespace Kernel::Init
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp
index 10265c23c..a850db3c4 100644
--- a/src/core/hle/kernel/k_class_token.cpp
+++ b/src/core/hle/kernel/k_class_token.cpp
@@ -16,6 +16,7 @@
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_transfer_memory.h"
@@ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject,
// static_assert(std::is_final_v<KCodeMemory> &&
// std::is_base_of_v<KAutoObject, KCodeMemory>);
+static_assert(std::is_base_of_v<KAutoObject, KSystemResource>);
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h
index ab20e00ff..e75b1c035 100644
--- a/src/core/hle/kernel/k_class_token.h
+++ b/src/core/hle/kernel/k_class_token.h
@@ -10,6 +10,8 @@ namespace Kernel {
class KAutoObject;
+class KSystemResource;
+
class KClassTokenGenerator {
public:
using TokenBaseType = u16;
@@ -58,7 +60,7 @@ private:
if constexpr (std::is_same<T, KAutoObject>::value) {
static_assert(T::ObjectType == ObjectType::KAutoObject);
return 0;
- } else if constexpr (!std::is_final<T>::value) {
+ } else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) {
static_assert(ObjectType::BaseClassesStart <= T::ObjectType &&
T::ObjectType < ObjectType::BaseClassesEnd);
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
@@ -108,6 +110,8 @@ public:
KSessionRequest,
KCodeMemory,
+ KSystemResource,
+
// NOTE: True order for these has not been determined yet.
KAlpha,
KBeta,
diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h
new file mode 100644
index 000000000..e3a0689c8
--- /dev/null
+++ b/src/core/hle/kernel/k_debug.h
@@ -0,0 +1,20 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/hle/kernel/k_auto_object.h"
+#include "core/hle/kernel/slab_helpers.h"
+
+namespace Kernel {
+
+class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> {
+ KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
+
+public:
+ explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
+
+ static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h
index 9076c8fa3..ac80d60a1 100644
--- a/src/core/hle/kernel/k_dynamic_page_manager.h
+++ b/src/core/hle/kernel/k_dynamic_page_manager.h
@@ -3,6 +3,8 @@
#pragma once
+#include <vector>
+
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
@@ -33,28 +35,36 @@ public:
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
- Result Initialize(VAddr addr, size_t sz) {
+ Result Initialize(VAddr memory, size_t size, size_t align) {
// We need to have positive size.
- R_UNLESS(sz > 0, ResultOutOfMemory);
- m_backing_memory.resize(sz);
+ R_UNLESS(size > 0, ResultOutOfMemory);
+ m_backing_memory.resize(size);
+
+ // Set addresses.
+ m_address = memory;
+ m_aligned_address = Common::AlignDown(memory, align);
- // Calculate management overhead.
- const size_t management_size =
- KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
- const size_t allocatable_size = sz - management_size;
+ // Calculate extents.
+ const size_t managed_size = m_address + size - m_aligned_address;
+ const size_t overhead_size = Common::AlignUp(
+ KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)),
+ sizeof(PageBuffer));
+ R_UNLESS(overhead_size < size, ResultOutOfMemory);
// Set tracking fields.
- m_address = addr;
- m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
- m_count = allocatable_size / sizeof(PageBuffer);
- R_UNLESS(m_count > 0, ResultOutOfMemory);
+ m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer));
+ m_count = m_size / sizeof(PageBuffer);
// Clear the management region.
- u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
- std::memset(management_ptr, 0, management_size);
+ u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size);
+ std::memset(management_ptr, 0, overhead_size);
// Initialize the bitmap.
- m_page_bitmap.Initialize(management_ptr, m_count);
+ const size_t allocatable_region_size =
+ (m_address + size - overhead_size) - m_aligned_address;
+ ASSERT(allocatable_region_size >= sizeof(PageBuffer));
+
+ m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));
// Free the pages to the bitmap.
for (size_t i = 0; i < m_count; i++) {
@@ -62,7 +72,8 @@ public:
std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
// Set the bit for the free page.
- m_page_bitmap.SetBit(i);
+ m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) /
+ sizeof(PageBuffer));
}
R_SUCCEED();
@@ -101,7 +112,28 @@ public:
m_page_bitmap.ClearBit(offset);
m_peak = std::max(m_peak, (++m_used));
- return GetPointer<PageBuffer>(m_address) + offset;
+ return GetPointer<PageBuffer>(m_aligned_address) + offset;
+ }
+
+ PageBuffer* Allocate(size_t count) {
+ // Take the lock.
+ // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ KScopedSpinLock lk(m_lock);
+
+ // Find a random free block.
+ s64 soffset = m_page_bitmap.FindFreeRange(count);
+ if (soffset < 0) [[likely]] {
+ return nullptr;
+ }
+
+ const size_t offset = static_cast<size_t>(soffset);
+
+ // Update our tracking.
+ m_page_bitmap.ClearRange(offset, count);
+ m_used += count;
+ m_peak = std::max(m_peak, m_used);
+
+ return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
void Free(PageBuffer* pb) {
@@ -113,7 +145,7 @@ public:
KScopedSpinLock lk(m_lock);
// Set the bit for the free page.
- size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
+ size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
// Decrement our used count.
@@ -127,6 +159,7 @@ private:
size_t m_peak{};
size_t m_count{};
VAddr m_address{};
+ VAddr m_aligned_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h
index 1ce517e8e..b6a27d648 100644
--- a/src/core/hle/kernel/k_dynamic_resource_manager.h
+++ b/src/core/hle/kernel/k_dynamic_resource_manager.h
@@ -6,6 +6,7 @@
#include "common/common_funcs.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_page_group.h"
namespace Kernel {
@@ -51,8 +52,10 @@ private:
DynamicSlabType* m_slab_heap{};
};
+class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {};
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
+using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType;
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event_info.h b/src/core/hle/kernel/k_event_info.h
new file mode 100644
index 000000000..25b3ff594
--- /dev/null
+++ b/src/core/hle/kernel/k_event_info.h
@@ -0,0 +1,64 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+
+#include <boost/intrusive/list.hpp>
+
+#include "core/hle/kernel/slab_helpers.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Kernel {
+
+class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> {
+public:
+ struct InfoCreateThread {
+ u32 thread_id{};
+ uintptr_t tls_address{};
+ };
+
+ struct InfoExitProcess {
+ Svc::ProcessExitReason reason{};
+ };
+
+ struct InfoExitThread {
+ Svc::ThreadExitReason reason{};
+ };
+
+ struct InfoException {
+ Svc::DebugException exception_type{};
+ s32 exception_data_count{};
+ uintptr_t exception_address{};
+ std::array<uintptr_t, 4> exception_data{};
+ };
+
+ struct InfoSystemCall {
+ s64 tick{};
+ s32 id{};
+ };
+
+public:
+ KEventInfo() = default;
+ ~KEventInfo() = default;
+
+public:
+ Svc::DebugEvent event{};
+ u32 thread_id{};
+ u32 flags{};
+ bool is_attached{};
+ bool continue_flag{};
+ bool ignore_continue{};
+ bool close_once{};
+ union {
+ InfoCreateThread create_thread;
+ InfoExitProcess exit_process;
+ InfoExitThread exit_thread;
+ InfoException exception;
+ InfoSystemCall system_call;
+ } info{};
+ KThread* debug_thread{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index e830ca46e..1c7a766c8 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -5,14 +5,11 @@
namespace Kernel {
-KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
-KHandleTable::~KHandleTable() = default;
-
Result KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
std::swap(m_table_size, saved_table_size);
@@ -25,28 +22,28 @@ Result KHandleTable::Finalize() {
}
}
- return ResultSuccess;
+ R_SUCCEED();
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
- if (Svc::IsPseudoHandle(handle)) {
+ if (Svc::IsPseudoHandle(handle)) [[unlikely]] {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
- if (handle_pack.reserved != 0) {
+ if (handle_pack.reserved != 0) [[unlikely]] {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
- if (this->IsValidHandle(handle)) {
+ if (this->IsValidHandle(handle)) [[likely]] {
const auto index = handle_pack.index;
obj = m_objects[index];
@@ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) {
}
// Close the object.
- kernel.UnregisterInUseObject(obj);
+ m_kernel.UnregisterInUseObject(obj);
obj->Close();
return true;
}
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
@@ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
}
- return ResultSuccess;
+ R_SUCCEED();
}
Result KHandleTable::Reserve(Handle* out_handle) {
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
*out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId());
- return ResultSuccess;
+ R_SUCCEED();
}
void KHandleTable::Unreserve(Handle handle) {
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Unpack the handle.
@@ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) {
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
- if (index < m_table_size) {
+ if (index < m_table_size) [[likely]] {
// NOTE: This code does not check the linear id.
ASSERT(m_objects[index] == nullptr);
this->FreeEntry(index);
@@ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) {
}
void KHandleTable::Register(Handle handle, KAutoObject* obj) {
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Unpack the handle.
@@ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) {
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
- if (index < m_table_size) {
+ if (index < m_table_size) [[likely]] {
// Set the entry.
ASSERT(m_objects[index] == nullptr);
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 0864a737c..65cae3b27 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -21,33 +21,38 @@ namespace Kernel {
class KernelCore;
class KHandleTable {
-public:
YUZU_NON_COPYABLE(KHandleTable);
YUZU_NON_MOVEABLE(KHandleTable);
+public:
static constexpr size_t MaxTableSize = 1024;
- explicit KHandleTable(KernelCore& kernel_);
- ~KHandleTable();
+public:
+ explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {}
Result Initialize(s32 size) {
+ // Check that the table size is valid.
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
+ // Lock.
+ KScopedDisableDispatch dd{m_kernel};
+ KScopedSpinLock lk(m_lock);
+
// Initialize all fields.
m_max_count = 0;
- m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size);
+ m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Free all entries.
- for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) {
+ for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
- m_entry_infos[i].next_free_index = i - 1;
+ m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
m_free_head_index = i;
}
- return ResultSuccess;
+ R_SUCCEED();
}
size_t GetTableSize() const {
@@ -66,13 +71,13 @@ public:
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table.
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
- if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) {
+ if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
return obj->DynamicCast<T*>();
} else {
return nullptr;
@@ -85,13 +90,13 @@ public:
// Handle pseudo-handles.
if constexpr (std::derived_from<KProcess, T>) {
if (handle == Svc::PseudoHandle::CurrentProcess) {
- auto* const cur_process = kernel.CurrentProcess();
+ auto* const cur_process = m_kernel.CurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == Svc::PseudoHandle::CurrentThread) {
- auto* const cur_thread = GetCurrentThreadPointer(kernel);
+ auto* const cur_thread = GetCurrentThreadPointer(m_kernel);
ASSERT(cur_thread != nullptr);
return cur_thread;
}
@@ -100,6 +105,37 @@ public:
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
+ KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
+ // Lock and look up in table.
+ KScopedDisableDispatch dd{m_kernel};
+ KScopedSpinLock lk(m_lock);
+
+ return this->GetObjectImpl(handle);
+ }
+
+ KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const {
+ // Handle pseudo-handles.
+ ASSERT(cur_thread != nullptr);
+ if (handle == Svc::PseudoHandle::CurrentProcess) {
+ auto* const cur_process =
+ static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess()));
+ ASSERT(cur_process != nullptr);
+ return cur_process;
+ }
+ if (handle == Svc::PseudoHandle::CurrentThread) {
+ return static_cast<KAutoObject*>(cur_thread);
+ }
+
+ return GetObjectForIpcWithoutPseudoHandle(handle);
+ }
+
+ KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const {
+ KScopedDisableDispatch dd{m_kernel};
+ KScopedSpinLock lk(m_lock);
+
+ return this->GetObjectByIndexImpl(out_handle, index);
+ }
+
Result Reserve(Handle* out_handle);
void Unreserve(Handle handle);
@@ -112,7 +148,7 @@ public:
size_t num_opened;
{
// Lock the table.
- KScopedDisableDispatch dd(kernel);
+ KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
@@ -120,13 +156,13 @@ public:
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
- if (cur_object == nullptr) {
+ if (cur_object == nullptr) [[unlikely]] {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
- if (cur_t == nullptr) {
+ if (cur_t == nullptr) [[unlikely]] {
break;
}
@@ -137,7 +173,7 @@ public:
}
// If we converted every object, succeed.
- if (num_opened == num_handles) {
+ if (num_opened == num_handles) [[likely]] {
return true;
}
@@ -191,21 +227,21 @@ private:
ASSERT(reserved == 0);
// Validate our indexing information.
- if (raw_value == 0) {
+ if (raw_value == 0) [[unlikely]] {
return false;
}
- if (linear_id == 0) {
+ if (linear_id == 0) [[unlikely]] {
return false;
}
- if (index >= m_table_size) {
+ if (index >= m_table_size) [[unlikely]] {
return false;
}
// Check that there's an object, and our serial id is correct.
- if (m_objects[index] == nullptr) {
+ if (m_objects[index] == nullptr) [[unlikely]] {
return false;
}
- if (m_entry_infos[index].GetLinearId() != linear_id) {
+ if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
return false;
}
@@ -215,11 +251,11 @@ private:
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
- if (handle_pack.reserved != 0) {
+ if (handle_pack.reserved != 0) [[unlikely]] {
return nullptr;
}
- if (this->IsValidHandle(handle)) {
+ if (this->IsValidHandle(handle)) [[likely]] {
return m_objects[handle_pack.index];
} else {
return nullptr;
@@ -227,9 +263,8 @@ private:
}
KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const {
-
// Index must be in bounds.
- if (index >= m_table_size) {
+ if (index >= m_table_size) [[unlikely]] {
return nullptr;
}
@@ -244,18 +279,15 @@ private:
private:
union HandlePack {
- HandlePack() = default;
- HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
+ constexpr HandlePack() = default;
+ constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
- u32 raw;
+ u32 raw{};
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
- static constexpr u16 MinLinearId = 1;
- static constexpr u16 MaxLinearId = 0x7FFF;
-
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
@@ -264,6 +296,10 @@ private:
return handle.raw;
}
+private:
+ static constexpr u16 MinLinearId = 1;
+ static constexpr u16 MaxLinearId = 0x7FFF;
+
union EntryInfo {
u16 linear_id;
s16 next_free_index;
@@ -271,21 +307,21 @@ private:
constexpr u16 GetLinearId() const {
return linear_id;
}
- constexpr s16 GetNextFreeIndex() const {
+ constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
};
private:
+ KernelCore& m_kernel;
std::array<EntryInfo, MaxTableSize> m_entry_infos{};
std::array<KAutoObject*, MaxTableSize> m_objects{};
- s32 m_free_head_index{-1};
+ mutable KSpinLock m_lock;
+ s32 m_free_head_index{};
u16 m_table_size{};
u16 m_max_count{};
- u16 m_next_linear_id{MinLinearId};
+ u16 m_next_linear_id{};
u16 m_count{};
- mutable KSpinLock m_lock;
- KernelCore& kernel;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index 9444f6bd2..6f845d675 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -35,26 +35,32 @@ enum class KMemoryState : u32 {
FlagCanMapProcess = (1 << 23),
FlagCanChangeAttribute = (1 << 24),
FlagCanCodeMemory = (1 << 25),
+ FlagLinearMapped = (1 << 26),
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
- FlagReferenceCounted | FlagCanChangeAttribute,
+ FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped,
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
- FlagCanAlignedDeviceMap | FlagReferenceCounted,
+ FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped,
- FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
+ FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap |
+ FlagLinearMapped,
Free = static_cast<u32>(Svc::MemoryState::Free),
- Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
+ Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
+ FlagCanAlignedDeviceMap,
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
FlagCanCodeMemory,
- Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
+ Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
+ FlagLinearMapped,
+
+ // Alias was removed after 1.0.0.
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
FlagCanCodeAlias,
@@ -67,18 +73,18 @@ enum class KMemoryState : u32 {
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
- ThreadLocal =
- static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
+ ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
- Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
+ Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
- SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
+ SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
- FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
+ FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc |
+ FlagCanUseNonDeviceIpc,
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
@@ -91,69 +97,69 @@ enum class KMemoryState : u32 {
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
- FlagReferenceCounted | FlagCanDebug,
- CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
+ FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
+ CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted |
+ FlagLinearMapped,
Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped,
+
+ Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
+ FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
+ FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
-static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001);
+static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
-static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03);
-static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04);
-static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05);
-static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006);
-static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08);
-static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09);
-static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A);
-static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B);
-static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C);
-static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D);
-static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E);
-static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F);
+static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
+static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
+static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
+static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
+
+static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
+static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
+static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
+static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
+static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
+static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
+static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
+static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
-static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811);
-static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812);
+static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
+static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
-static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214);
-static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015);
+static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
+static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
+static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
enum class KMemoryPermission : u8 {
None = 0,
All = static_cast<u8>(~None),
- Read = 1 << 0,
- Write = 1 << 1,
- Execute = 1 << 2,
-
- ReadAndWrite = Read | Write,
- ReadAndExecute = Read | Execute,
-
- UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
- Svc::MemoryPermission::Execute),
-
KernelShift = 3,
- KernelRead = Read << KernelShift,
- KernelWrite = Write << KernelShift,
- KernelExecute = Execute << KernelShift,
+ KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift,
+ KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift,
+ KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift,
NotMapped = (1 << (2 * KernelShift)),
KernelReadWrite = KernelRead | KernelWrite,
KernelReadExecute = KernelRead | KernelExecute,
- UserRead = Read | KernelRead,
- UserWrite = Write | KernelWrite,
- UserExecute = Execute,
+ UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead,
+ UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite,
+ UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute),
UserReadWrite = UserRead | UserWrite,
UserReadExecute = UserRead | UserExecute,
- IpcLockChangeMask = NotMapped | UserReadWrite
+ UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
+ Svc::MemoryPermission::Execute),
+
+ IpcLockChangeMask = NotMapped | UserReadWrite,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
@@ -468,6 +474,7 @@ public:
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+ // New permission/right aren't used.
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
@@ -478,6 +485,7 @@ public:
constexpr void UpdateDeviceDisableMergeStateForShareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+ // New permission/left aren't used.
if (right) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
@@ -494,6 +502,8 @@ public:
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
+ // New permission isn't used.
+
// We must either be shared or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
m_device_use_count == 0);
@@ -509,6 +519,7 @@ public:
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
+ // New permission/right aren't used.
if (left) {
if (!m_device_disable_merge_left_count) {
@@ -528,6 +539,8 @@ public:
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
+ // New permission/left aren't used.
+
if (right) {
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
ASSERT(old_device_disable_merge_right_count > 0);
@@ -546,6 +559,8 @@ public:
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
+ // New permission isn't used.
+
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
@@ -563,6 +578,7 @@ public:
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
+ // New permission isn't used.
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
@@ -613,6 +629,8 @@ public:
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
[[maybe_unused]] bool right) {
+ // New permission isn't used.
+
// We must be locked.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
index 55dc296d0..72c3ee4b7 100644
--- a/src/core/hle/kernel/k_memory_layout.cpp
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_
}
}
-size_t KMemoryLayout::GetResourceRegionSizeForInit() {
- // Calculate resource region size based on whether we allow extra threads.
- const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
- size_t resource_region_size =
- KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0);
-
- return resource_region_size;
+size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) {
+ return KernelResourceSize + KSystemControl::SecureAppletMemorySize +
+ (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0);
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 884fc623a..fd6e1d3e6 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
-constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000;
+constexpr size_t KernelPageBufferHeapSize = 0x3E0000;
+constexpr size_t KernelSlabHeapAdditionalSize = 0x148000;
+constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
-constexpr std::size_t KernelResourceSize =
- KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
+constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize +
+ KernelSlabHeapSize + KernelPageBufferHeapSize;
constexpr bool IsKernelAddressKey(VAddr key) {
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
@@ -168,6 +170,11 @@ public:
KMemoryRegionType_VirtualDramKernelTraceBuffer));
}
+ const KMemoryRegion& GetSecureAppletMemoryRegion() {
+ return Dereference(GetVirtualMemoryRegionTree().FindByType(
+ KMemoryRegionType_VirtualDramKernelSecureAppletMemory));
+ }
+
const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
return Dereference(FindVirtualLinear(address));
}
@@ -229,7 +236,7 @@ public:
void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
VAddr linear_virtual_start);
- static size_t GetResourceRegionSizeForInit();
+ static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
auto GetKernelRegionExtents() const {
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel);
@@ -279,6 +286,10 @@ public:
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionType_DramKernelSlab);
}
+ auto GetKernelSecureAppletMemoryRegionPhysicalExtents() {
+ return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
+ KMemoryRegionType_DramKernelSecureAppletMemory);
+ }
auto GetKernelPageTableHeapRegionPhysicalExtents() const {
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionType_DramKernelPtHeap);
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 646711505..c4bf306e8 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
return KMemoryManager::Pool::SystemNonSecure;
} else {
- ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
- return {};
+ UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
}
}
} // namespace
-KMemoryManager::KMemoryManager(Core::System& system_)
- : system{system_}, pool_locks{
- KLightLock{system_.Kernel()},
- KLightLock{system_.Kernel()},
- KLightLock{system_.Kernel()},
- KLightLock{system_.Kernel()},
- } {}
+KMemoryManager::KMemoryManager(Core::System& system)
+ : m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()},
+ m_pool_locks{
+ KLightLock{system.Kernel()},
+ KLightLock{system.Kernel()},
+ KLightLock{system.Kernel()},
+ KLightLock{system.Kernel()},
+ } {}
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
// Clear the management region to zero.
const VAddr management_region_end = management_region + management_region_size;
+ // std::memset(GetVoidPointer(management_region), 0, management_region_size);
// Reset our manager count.
- num_managers = 0;
+ m_num_managers = 0;
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
- while (num_managers != MaxManagerCount) {
+ while (m_num_managers != MaxManagerCount) {
// Locate the region that should initialize the current manager.
PAddr region_address = 0;
size_t region_size = 0;
Pool region_pool = Pool::Count;
- for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
+ for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
// We only care about regions that we need to create managers for.
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
continue;
}
// We want to initialize the managers in order.
- if (it.GetAttributes() != num_managers) {
+ if (it.GetAttributes() != m_num_managers) {
continue;
}
@@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
}
// Initialize a new manager for the region.
- Impl* manager = std::addressof(managers[num_managers++]);
- ASSERT(num_managers <= managers.size());
+ Impl* manager = std::addressof(m_managers[m_num_managers++]);
+ ASSERT(m_num_managers <= m_managers.size());
const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
management_region_end, region_pool);
@@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Insert the manager into the pool list.
const auto region_pool_index = static_cast<u32>(region_pool);
- if (pool_managers_tail[region_pool_index] == nullptr) {
- pool_managers_head[region_pool_index] = manager;
+ if (m_pool_managers_tail[region_pool_index] == nullptr) {
+ m_pool_managers_head[region_pool_index] = manager;
} else {
- pool_managers_tail[region_pool_index]->SetNext(manager);
- manager->SetPrev(pool_managers_tail[region_pool_index]);
+ m_pool_managers_tail[region_pool_index]->SetNext(manager);
+ manager->SetPrev(m_pool_managers_tail[region_pool_index]);
}
- pool_managers_tail[region_pool_index] = manager;
+ m_pool_managers_tail[region_pool_index] = manager;
}
// Free each region to its corresponding heap.
@@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
const PAddr ini_last = ini_end - 1;
- for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
+ for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
// Get the manager for the region.
- auto index = it.GetAttributes();
- auto& manager = managers[index];
+ auto& manager = m_managers[it.GetAttributes()];
const PAddr cur_start = it.GetAddress();
const PAddr cur_last = it.GetLastAddress();
@@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
}
// Update the used size for all managers.
- for (size_t i = 0; i < num_managers; ++i) {
- managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
+ for (size_t i = 0; i < m_num_managers; ++i) {
+ m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
}
}
+Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
+ UNREACHABLE();
+}
+
+void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
+ UNREACHABLE();
+}
+
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
// Early return if we're allocating no pages.
if (num_pages == 0) {
@@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
// Lock the pool that we're allocating from.
const auto [pool, dir] = DecodeOption(option);
- KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
+ KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]);
// Choose a heap based on our page size request.
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
@@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
PAddr allocated_block = 0;
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
- allocated_block = chosen_manager->AllocateBlock(heap_index, true);
+ allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
if (allocated_block != 0) {
break;
}
@@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
return 0;
}
- // If we allocated more than we need, free some.
- const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
- if (allocated_pages > num_pages) {
- chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
+ // Maintain the optimized memory bitmap, if we should.
+ if (m_has_optimized_process[static_cast<size_t>(pool)]) {
+ UNIMPLEMENTED();
}
// Open the first reference to the pages.
@@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
- Direction dir, bool random) {
+ Direction dir, bool unoptimized, bool random) {
// Choose a heap based on our page size request.
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
// Ensure that we don't leave anything un-freed.
- auto group_guard = SCOPE_GUARD({
+ ON_RESULT_FAILURE {
for (const auto& it : out->Nodes()) {
- auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
- const size_t num_pages_to_free =
+ auto& manager = this->GetManager(it.GetAddress());
+ const size_t node_num_pages =
std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
- manager.Free(it.GetAddress(), num_pages_to_free);
+ manager.Free(it.GetAddress(), node_num_pages);
}
- });
+ out->Finalize();
+ };
// Keep allocating until we've allocated all our pages.
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
@@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
break;
}
- // Safely add it to our group.
- {
- auto block_guard =
- SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
- R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
- block_guard.Cancel();
+ // Ensure we don't leak the block if we fail.
+ ON_RESULT_FAILURE_2 {
+ cur_manager->Free(allocated_block, pages_per_alloc);
+ };
+
+ // Add the block to our group.
+ R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
+
+ // Maintain the optimized memory bitmap, if we should.
+ if (unoptimized) {
+ UNIMPLEMENTED();
}
num_pages -= pages_per_alloc;
@@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
R_UNLESS(num_pages == 0, ResultOutOfMemory);
// We succeeded!
- group_guard.Cancel();
- return ResultSuccess;
+ R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
@@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
// Lock the pool that we're allocating from.
const auto [pool, dir] = DecodeOption(option);
- KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
// Allocate the page group.
- R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
+ R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir,
+ m_has_optimized_process[static_cast<size_t>(pool)], true));
// Open the first reference to the pages.
for (const auto& block : out->Nodes()) {
@@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
// Get the manager for the current address.
- auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
+ auto& manager = this->GetManager(cur_address);
// Process part or all of the block.
const size_t cur_pages =
@@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
}
}
- return ResultSuccess;
+ R_SUCCEED();
}
-Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
- u64 process_id, u8 fill_pattern) {
+Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option,
+ u64 process_id, u8 fill_pattern) {
ASSERT(out != nullptr);
ASSERT(out->GetNumPages() == 0);
@@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
const auto [pool, dir] = DecodeOption(option);
// Allocate the memory.
+ bool optimized;
{
// Lock the pool that we're allocating from.
- KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
+
+ // Check if we have an optimized process.
+ const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)];
+ const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id;
// Allocate the page group.
- R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
+ R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized,
+ false));
- // Open the first reference to the pages.
- for (const auto& block : out->Nodes()) {
- PAddr cur_address = block.GetAddress();
- size_t remaining_pages = block.GetNumPages();
- while (remaining_pages > 0) {
- // Get the manager for the current address.
- auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
-
- // Process part or all of the block.
- const size_t cur_pages =
- std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
- manager.OpenFirst(cur_address, cur_pages);
-
- // Advance.
- cur_address += cur_pages * PageSize;
- remaining_pages -= cur_pages;
- }
- }
+ // Set whether we should optimize.
+ optimized = has_optimized && is_optimized;
}
- // Set all the allocated memory.
- for (const auto& block : out->Nodes()) {
- std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
- block.GetSize());
- }
+ // Perform optimized memory tracking, if we should.
+ if (optimized) {
+ // Iterate over the allocated blocks.
+ for (const auto& block : out->Nodes()) {
+ // Get the block extents.
+ const PAddr block_address = block.GetAddress();
+ const size_t block_pages = block.GetNumPages();
- return ResultSuccess;
-}
+ // If it has no pages, we don't need to do anything.
+ if (block_pages == 0) {
+ continue;
+ }
-void KMemoryManager::Open(PAddr address, size_t num_pages) {
- // Repeatedly open references until we've done so for all pages.
- while (num_pages) {
- auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
- const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
+ // Fill all the pages that we need to fill.
+ bool any_new = false;
+ {
+ PAddr cur_address = block_address;
+ size_t remaining_pages = block_pages;
+ while (remaining_pages > 0) {
+ // Get the manager for the current address.
+ auto& manager = this->GetManager(cur_address);
+
+ // Process part or all of the block.
+ const size_t cur_pages =
+ std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
+ any_new =
+ manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ remaining_pages -= cur_pages;
+ }
+ }
- {
- KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
- manager.Open(address, cur_pages);
+ // If there are new pages, update tracking for the allocation.
+ if (any_new) {
+ // Update tracking for the allocation.
+ PAddr cur_address = block_address;
+ size_t remaining_pages = block_pages;
+ while (remaining_pages > 0) {
+ // Get the manager for the current address.
+ auto& manager = this->GetManager(cur_address);
+
+ // Lock the pool for the manager.
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
+
+ // Track some or all of the current pages.
+ const size_t cur_pages =
+ std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
+ manager.TrackOptimizedAllocation(cur_address, cur_pages);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ remaining_pages -= cur_pages;
+ }
+ }
}
-
- num_pages -= cur_pages;
- address += cur_pages * PageSize;
- }
-}
-
-void KMemoryManager::Close(PAddr address, size_t num_pages) {
- // Repeatedly close references until we've done so for all pages.
- while (num_pages) {
- auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
- const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
-
- {
- KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
- manager.Close(address, cur_pages);
+ } else {
+ // Set all the allocated memory.
+ for (const auto& block : out->Nodes()) {
+ std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
+ block.GetSize());
}
-
- num_pages -= cur_pages;
- address += cur_pages * PageSize;
}
-}
-void KMemoryManager::Close(const KPageGroup& pg) {
- for (const auto& node : pg.Nodes()) {
- Close(node.GetAddress(), node.GetNumPages());
- }
-}
-void KMemoryManager::Open(const KPageGroup& pg) {
- for (const auto& node : pg.Nodes()) {
- Open(node.GetAddress(), node.GetNumPages());
- }
+ R_SUCCEED();
}
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
@@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
ASSERT(Common::IsAligned(total_management_size, PageSize));
// Setup region.
- pool = p;
- management_region = management;
- page_reference_counts.resize(
+ m_pool = p;
+ m_management_region = management;
+ m_page_reference_counts.resize(
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
- ASSERT(Common::IsAligned(management_region, PageSize));
+ ASSERT(Common::IsAligned(m_management_region, PageSize));
// Initialize the manager's KPageHeap.
- heap.Initialize(address, size, management + manager_size, page_heap_size);
+ m_heap.Initialize(address, size, management + manager_size, page_heap_size);
return total_management_size;
}
+void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
+ UNREACHABLE();
+}
+
+void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
+ UNREACHABLE();
+}
+
+bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
+ u8 fill_pattern) {
+ UNREACHABLE();
+}
+
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
const size_t optimize_map_size =
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index dcb9b6348..401d4e644 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -21,11 +21,8 @@ namespace Kernel {
class KPageGroup;
-class KMemoryManager final {
+class KMemoryManager {
public:
- YUZU_NON_COPYABLE(KMemoryManager);
- YUZU_NON_MOVEABLE(KMemoryManager);
-
enum class Pool : u32 {
Application = 0,
Applet = 1,
@@ -45,16 +42,85 @@ public:
enum class Direction : u32 {
FromFront = 0,
FromBack = 1,
-
Shift = 0,
Mask = (0xF << Shift),
};
- explicit KMemoryManager(Core::System& system_);
+ static constexpr size_t MaxManagerCount = 10;
+
+ explicit KMemoryManager(Core::System& system);
void Initialize(VAddr management_region, size_t management_region_size);
- constexpr size_t GetSize(Pool pool) const {
+ Result InitializeOptimizedMemory(u64 process_id, Pool pool);
+ void FinalizeOptimizedMemory(u64 process_id, Pool pool);
+
+ PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
+ Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
+ Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
+ u8 fill_pattern);
+
+ Pool GetPool(PAddr address) const {
+ return this->GetManager(address).GetPool();
+ }
+
+ void Open(PAddr address, size_t num_pages) {
+ // Repeatedly open references until we've done so for all pages.
+ while (num_pages) {
+ auto& manager = this->GetManager(address);
+ const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
+
+ {
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
+ manager.Open(address, cur_pages);
+ }
+
+ num_pages -= cur_pages;
+ address += cur_pages * PageSize;
+ }
+ }
+
+ void OpenFirst(PAddr address, size_t num_pages) {
+ // Repeatedly open references until we've done so for all pages.
+ while (num_pages) {
+ auto& manager = this->GetManager(address);
+ const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
+
+ {
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
+ manager.OpenFirst(address, cur_pages);
+ }
+
+ num_pages -= cur_pages;
+ address += cur_pages * PageSize;
+ }
+ }
+
+ void Close(PAddr address, size_t num_pages) {
+ // Repeatedly close references until we've done so for all pages.
+ while (num_pages) {
+ auto& manager = this->GetManager(address);
+ const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
+
+ {
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
+ manager.Close(address, cur_pages);
+ }
+
+ num_pages -= cur_pages;
+ address += cur_pages * PageSize;
+ }
+ }
+
+ size_t GetSize() {
+ size_t total = 0;
+ for (size_t i = 0; i < m_num_managers; i++) {
+ total += m_managers[i].GetSize();
+ }
+ return total;
+ }
+
+ size_t GetSize(Pool pool) {
constexpr Direction GetSizeDirection = Direction::FromFront;
size_t total = 0;
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
@@ -64,18 +130,36 @@ public:
return total;
}
- PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
- Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
- Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
- u8 fill_pattern);
+ size_t GetFreeSize() {
+ size_t total = 0;
+ for (size_t i = 0; i < m_num_managers; i++) {
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]);
+ total += m_managers[i].GetFreeSize();
+ }
+ return total;
+ }
- static constexpr size_t MaxManagerCount = 10;
+ size_t GetFreeSize(Pool pool) {
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
+
+ constexpr Direction GetSizeDirection = Direction::FromFront;
+ size_t total = 0;
+ for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
+ manager = this->GetNextManager(manager, GetSizeDirection)) {
+ total += manager->GetFreeSize();
+ }
+ return total;
+ }
- void Close(PAddr address, size_t num_pages);
- void Close(const KPageGroup& pg);
+ void DumpFreeList(Pool pool) {
+ KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
- void Open(PAddr address, size_t num_pages);
- void Open(const KPageGroup& pg);
+ constexpr Direction DumpDirection = Direction::FromFront;
+ for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr;
+ manager = this->GetNextManager(manager, DumpDirection)) {
+ manager->DumpFreeList();
+ }
+ }
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
@@ -88,14 +172,13 @@ public:
}
static constexpr Pool GetPool(u32 option) {
- return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >>
+ return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >>
static_cast<u32>(Pool::Shift));
}
static constexpr Direction GetDirection(u32 option) {
- return static_cast<Direction>(
- (static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >>
- static_cast<u32>(Direction::Shift));
+ return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >>
+ static_cast<u32>(Direction::Shift));
}
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
@@ -103,74 +186,88 @@ public:
}
private:
- class Impl final {
+ class Impl {
public:
- YUZU_NON_COPYABLE(Impl);
- YUZU_NON_MOVEABLE(Impl);
+ static size_t CalculateManagementOverheadSize(size_t region_size);
+
+ static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
+ return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
+ Common::BitSize<u64>()) *
+ sizeof(u64);
+ }
+ public:
Impl() = default;
- ~Impl() = default;
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
Pool p);
- VAddr AllocateBlock(s32 index, bool random) {
- return heap.AllocateBlock(index, random);
+ PAddr AllocateBlock(s32 index, bool random) {
+ return m_heap.AllocateBlock(index, random);
}
-
- void Free(VAddr addr, size_t num_pages) {
- heap.Free(addr, num_pages);
+ PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
+ return m_heap.AllocateAligned(index, num_pages, align_pages);
+ }
+ void Free(PAddr addr, size_t num_pages) {
+ m_heap.Free(addr, num_pages);
}
void SetInitialUsedHeapSize(size_t reserved_size) {
- heap.SetInitialUsedSize(reserved_size);
+ m_heap.SetInitialUsedSize(reserved_size);
}
- constexpr Pool GetPool() const {
- return pool;
+ void InitializeOptimizedMemory() {
+ UNIMPLEMENTED();
}
+ void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
+ void TrackOptimizedAllocation(PAddr block, size_t num_pages);
+
+ bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
+
+ constexpr Pool GetPool() const {
+ return m_pool;
+ }
constexpr size_t GetSize() const {
- return heap.GetSize();
+ return m_heap.GetSize();
+ }
+ constexpr PAddr GetEndAddress() const {
+ return m_heap.GetEndAddress();
}
- constexpr VAddr GetAddress() const {
- return heap.GetAddress();
+ size_t GetFreeSize() const {
+ return m_heap.GetFreeSize();
}
- constexpr VAddr GetEndAddress() const {
- return heap.GetEndAddress();
+ void DumpFreeList() const {
+ UNIMPLEMENTED();
}
constexpr size_t GetPageOffset(PAddr address) const {
- return heap.GetPageOffset(address);
+ return m_heap.GetPageOffset(address);
}
-
constexpr size_t GetPageOffsetToEnd(PAddr address) const {
- return heap.GetPageOffsetToEnd(address);
+ return m_heap.GetPageOffsetToEnd(address);
}
constexpr void SetNext(Impl* n) {
- next = n;
+ m_next = n;
}
-
constexpr void SetPrev(Impl* n) {
- prev = n;
+ m_prev = n;
}
-
constexpr Impl* GetNext() const {
- return next;
+ return m_next;
}
-
constexpr Impl* GetPrev() const {
- return prev;
+ return m_prev;
}
void OpenFirst(PAddr address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
- const RefCount ref_count = (++page_reference_counts[index]);
+ const RefCount ref_count = (++m_page_reference_counts[index]);
ASSERT(ref_count == 1);
index++;
@@ -181,7 +278,7 @@ private:
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
- const RefCount ref_count = (++page_reference_counts[index]);
+ const RefCount ref_count = (++m_page_reference_counts[index]);
ASSERT(ref_count > 1);
index++;
@@ -195,8 +292,8 @@ private:
size_t free_start = 0;
size_t free_count = 0;
while (index < end) {
- ASSERT(page_reference_counts[index] > 0);
- const RefCount ref_count = (--page_reference_counts[index]);
+ ASSERT(m_page_reference_counts[index] > 0);
+ const RefCount ref_count = (--m_page_reference_counts[index]);
// Keep track of how many zero refcounts we see in a row, to minimize calls to free.
if (ref_count == 0) {
@@ -208,7 +305,7 @@ private:
}
} else {
if (free_count > 0) {
- this->Free(heap.GetAddress() + free_start * PageSize, free_count);
+ this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
free_count = 0;
}
}
@@ -217,44 +314,36 @@ private:
}
if (free_count > 0) {
- this->Free(heap.GetAddress() + free_start * PageSize, free_count);
+ this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
}
}
- static size_t CalculateManagementOverheadSize(size_t region_size);
-
- static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
- return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
- Common::BitSize<u64>()) *
- sizeof(u64);
- }
-
private:
using RefCount = u16;
- KPageHeap heap;
- std::vector<RefCount> page_reference_counts;
- VAddr management_region{};
- Pool pool{};
- Impl* next{};
- Impl* prev{};
+ KPageHeap m_heap;
+ std::vector<RefCount> m_page_reference_counts;
+ VAddr m_management_region{};
+ Pool m_pool{};
+ Impl* m_next{};
+ Impl* m_prev{};
};
private:
- Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
- return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
+ Impl& GetManager(PAddr address) {
+ return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
- const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
- return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
+ const Impl& GetManager(PAddr address) const {
+ return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
- constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
- return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
- : pool_managers_head[static_cast<size_t>(pool)];
+ constexpr Impl* GetFirstManager(Pool pool, Direction dir) {
+ return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)]
+ : m_pool_managers_head[static_cast<size_t>(pool)];
}
- constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
+ constexpr Impl* GetNextManager(Impl* cur, Direction dir) {
if (dir == Direction::FromBack) {
return cur->GetPrev();
} else {
@@ -263,15 +352,21 @@ private:
}
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
- bool random);
+ bool unoptimized, bool random);
private:
- Core::System& system;
- std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
- std::array<Impl*, MaxManagerCount> pool_managers_head{};
- std::array<Impl*, MaxManagerCount> pool_managers_tail{};
- std::array<Impl, MaxManagerCount> managers;
- size_t num_managers{};
+ template <typename T>
+ using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>;
+
+ Core::System& m_system;
+ const KMemoryLayout& m_memory_layout;
+ PoolArray<KLightLock> m_pool_locks;
+ std::array<Impl*, MaxManagerCount> m_pool_managers_head{};
+ std::array<Impl*, MaxManagerCount> m_pool_managers_tail{};
+ std::array<Impl, MaxManagerCount> m_managers;
+ size_t m_num_managers{};
+ PoolArray<u64> m_optimized_process_ids{};
+ PoolArray<bool> m_has_optimized_process{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h
index 7e2fcccdc..e5630c1ac 100644
--- a/src/core/hle/kernel/k_memory_region_type.h
+++ b/src/core/hle/kernel/k_memory_region_type.h
@@ -142,32 +142,38 @@ private:
} // namespace impl
-constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
-constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
-constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
+constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
+
+constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
+constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1);
static_assert(KMemoryRegionType_Dram.GetValue() == 0x2);
-constexpr auto KMemoryRegionType_DramKernelBase =
+// constexpr inline auto KMemoryRegionType_CoreLocalRegion =
+// KMemoryRegionType_None.DeriveInitial(2).Finalize();
+// static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4);
+
+constexpr inline auto KMemoryRegionType_DramKernelBase =
KMemoryRegionType_Dram.DeriveSparse(0, 3, 0)
.SetAttribute(KMemoryRegionAttr_NoUserMap)
.SetAttribute(KMemoryRegionAttr_CarveoutProtected);
-constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
-constexpr auto KMemoryRegionType_DramHeapBase =
+constexpr inline auto KMemoryRegionType_DramReservedBase =
+ KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
+constexpr inline auto KMemoryRegionType_DramHeapBase =
KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelBase.GetValue() ==
(0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16));
static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped));
-constexpr auto KMemoryRegionType_DramKernelCode =
+constexpr inline auto KMemoryRegionType_DramKernelCode =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0);
-constexpr auto KMemoryRegionType_DramKernelSlab =
+constexpr inline auto KMemoryRegionType_DramKernelSlab =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1);
-constexpr auto KMemoryRegionType_DramKernelPtHeap =
+constexpr inline auto KMemoryRegionType_DramKernelPtHeap =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute(
KMemoryRegionAttr_LinearMapped);
-constexpr auto KMemoryRegionType_DramKernelInitPt =
+constexpr inline auto KMemoryRegionType_DramKernelInitPt =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute(
KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelCode.GetValue() ==
@@ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
(0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_LinearMapped));
-constexpr auto KMemoryRegionType_DramReservedEarly =
+constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
+ KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
+ KMemoryRegionAttr_LinearMapped);
+static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
+ (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
+ KMemoryRegionAttr_LinearMapped));
+
+constexpr inline auto KMemoryRegionType_DramReservedEarly =
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() ==
(0x16 | KMemoryRegionAttr_NoUserMap));
-constexpr auto KMemoryRegionType_KernelTraceBuffer =
+constexpr inline auto KMemoryRegionType_KernelTraceBuffer =
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0)
.SetAttribute(KMemoryRegionAttr_LinearMapped)
.SetAttribute(KMemoryRegionAttr_UserReadOnly);
-constexpr auto KMemoryRegionType_OnMemoryBootImage =
+constexpr inline auto KMemoryRegionType_OnMemoryBootImage =
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1);
-constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
+constexpr inline auto KMemoryRegionType_DTB =
+ KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() ==
(0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly));
static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156);
static_assert(KMemoryRegionType_DTB.GetValue() == 0x256);
-constexpr auto KMemoryRegionType_DramPoolPartition =
+constexpr inline auto KMemoryRegionType_DramPoolPartition =
KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
-constexpr auto KMemoryRegionType_DramPoolManagement =
+constexpr inline auto KMemoryRegionType_DramPoolManagement =
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
KMemoryRegionAttr_CarveoutProtected);
-constexpr auto KMemoryRegionType_DramUserPool =
+constexpr inline auto KMemoryRegionType_DramUserPool =
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
@@ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
-constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0);
-constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1);
-constexpr auto KMemoryRegionType_DramSystemNonSecurePool =
+constexpr inline auto KMemoryRegionType_DramApplicationPool =
+ KMemoryRegionType_DramUserPool.Derive(4, 0);
+constexpr inline auto KMemoryRegionType_DramAppletPool =
+ KMemoryRegionType_DramUserPool.Derive(4, 1);
+constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
KMemoryRegionType_DramUserPool.Derive(4, 2);
-constexpr auto KMemoryRegionType_DramSystemPool =
+constexpr inline auto KMemoryRegionType_DramSystemPool =
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
@@ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_CarveoutProtected));
-constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
-constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap =
+constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
+ KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
+constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
-constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
+constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
// UNUSED: .DeriveSparse(2, 2, 0);
-constexpr auto KMemoryRegionType_VirtualDramUnknownDebug =
+constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
-constexpr auto KMemoryRegionType_VirtualDramKernelInitPt =
+constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
+ KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
+static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
+
+constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
-constexpr auto KMemoryRegionType_VirtualDramPoolManagement =
+constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
-constexpr auto KMemoryRegionType_VirtualDramUserPool =
+constexpr inline auto KMemoryRegionType_VirtualDramUserPool =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
-// NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying
-// to understand why Nintendo made this choice.
+// NOTE: For unknown reason, the pools are derived out-of-order here.
+// It's worth eventually trying to understand why Nintendo made this choice.
// UNUSED: .Derive(6, 0);
// UNUSED: .Derive(6, 1);
-constexpr auto KMemoryRegionType_VirtualDramAppletPool =
+constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
-constexpr auto KMemoryRegionType_VirtualDramApplicationPool =
+constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
-constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
+constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
-constexpr auto KMemoryRegionType_VirtualDramSystemPool =
+constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
-constexpr auto KMemoryRegionType_ArchDeviceBase =
+constexpr inline auto KMemoryRegionType_ArchDeviceBase =
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
-constexpr auto KMemoryRegionType_BoardDeviceBase =
+constexpr inline auto KMemoryRegionType_BoardDeviceBase =
KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();
static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5);
static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
@@ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
#error "Unimplemented"
#else
// Default to no architecture devices.
-constexpr auto NumArchitectureDeviceRegions = 0;
+constexpr inline auto NumArchitectureDeviceRegions = 0;
#endif
static_assert(NumArchitectureDeviceRegions >= 0);
@@ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0);
#include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc"
#else
// Default to no board devices.
-constexpr auto NumBoardDeviceRegions = 0;
+constexpr inline auto NumBoardDeviceRegions = 0;
#endif
static_assert(NumBoardDeviceRegions >= 0);
-constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
-constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
-constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
-constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
+constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
+constexpr inline auto KMemoryRegionType_KernelStack =
+ KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
+constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
+constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19);
static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29);
static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49);
static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89);
-constexpr auto KMemoryRegionType_KernelMiscDerivedBase =
+constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase =
KMemoryRegionType_KernelMisc.DeriveTransition();
static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149);
// UNUSED: .Derive(7, 0);
-constexpr auto KMemoryRegionType_KernelMiscMainStack =
+constexpr inline auto KMemoryRegionType_KernelMiscMainStack =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1);
-constexpr auto KMemoryRegionType_KernelMiscMappedDevice =
+constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2);
-constexpr auto KMemoryRegionType_KernelMiscExceptionStack =
+constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3);
-constexpr auto KMemoryRegionType_KernelMiscUnknownDebug =
+constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4);
// UNUSED: .Derive(7, 5);
-constexpr auto KMemoryRegionType_KernelMiscIdleStack =
+constexpr inline auto KMemoryRegionType_KernelMiscIdleStack =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);
static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49);
static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49);
@@ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349);
static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549);
static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349);
-constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
+constexpr inline auto KMemoryRegionType_KernelTemp =
+ KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
@@ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
+ } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
+ return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {
diff --git a/src/core/hle/kernel/k_page_bitmap.h b/src/core/hle/kernel/k_page_bitmap.h
index c97b3dc0b..0ff987732 100644
--- a/src/core/hle/kernel/k_page_bitmap.h
+++ b/src/core/hle/kernel/k_page_bitmap.h
@@ -16,107 +16,126 @@
namespace Kernel {
class KPageBitmap {
-private:
+public:
class RandomBitGenerator {
- private:
- Common::TinyMT rng{};
- u32 entropy{};
- u32 bits_available{};
+ public:
+ RandomBitGenerator() {
+ m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
+ }
+
+ u64 SelectRandomBit(u64 bitmap) {
+ u64 selected = 0;
+
+ for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0;
+ cur_num_bits /= 2) {
+ const u64 high = (bitmap >> cur_num_bits);
+ const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits)));
+
+ // Choose high if we have high and (don't have low or select high randomly).
+ if (high && (low == 0 || this->GenerateRandomBit())) {
+ bitmap = high;
+ selected += cur_num_bits;
+ } else {
+ bitmap = low;
+ selected += 0;
+ }
+ }
+
+ return selected;
+ }
+
+ u64 GenerateRandom(u64 max) {
+ // Determine the number of bits we need.
+ const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max));
+
+ // Generate a random value of the desired bitwidth.
+ const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed));
+
+ // Adjust the value to be in range.
+ return rnd - ((rnd / max) * max);
+ }
private:
void RefreshEntropy() {
- entropy = rng.GenerateRandomU32();
- bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>());
+ m_entropy = m_rng.GenerateRandomU32();
+ m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>());
}
bool GenerateRandomBit() {
- if (bits_available == 0) {
+ if (m_bits_available == 0) {
this->RefreshEntropy();
}
- const bool rnd_bit = (entropy & 1) != 0;
- entropy >>= 1;
- --bits_available;
+ const bool rnd_bit = (m_entropy & 1) != 0;
+ m_entropy >>= 1;
+ --m_bits_available;
return rnd_bit;
}
- public:
- RandomBitGenerator() {
- rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
- }
+ u64 GenerateRandomBits(u32 num_bits) {
+ u64 result = 0;
- std::size_t SelectRandomBit(u64 bitmap) {
- u64 selected = 0;
+ // Iteratively add random bits to our result.
+ while (num_bits > 0) {
+ // Ensure we have random bits to take from.
+ if (m_bits_available == 0) {
+ this->RefreshEntropy();
+ }
- u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2;
- u64 cur_mask = (1ULL << cur_num_bits) - 1;
+ // Determine how many bits to take this round.
+ const auto cur_bits = std::min(num_bits, m_bits_available);
- while (cur_num_bits) {
- const u64 low = (bitmap >> 0) & cur_mask;
- const u64 high = (bitmap >> cur_num_bits) & cur_mask;
+ // Generate mask for our current bits.
+ const u64 mask = (static_cast<u64>(1) << cur_bits) - 1;
- bool choose_low;
- if (high == 0) {
- // If only low val is set, choose low.
- choose_low = true;
- } else if (low == 0) {
- // If only high val is set, choose high.
- choose_low = false;
- } else {
- // If both are set, choose random.
- choose_low = this->GenerateRandomBit();
- }
+ // Add bits to output from our entropy.
+ result <<= cur_bits;
+ result |= (m_entropy & mask);
- // If we chose low, proceed with low.
- if (choose_low) {
- bitmap = low;
- selected += 0;
- } else {
- bitmap = high;
- selected += cur_num_bits;
- }
+ // Remove bits from our entropy.
+ m_entropy >>= cur_bits;
+ m_bits_available -= cur_bits;
- // Proceed.
- cur_num_bits /= 2;
- cur_mask >>= cur_num_bits;
+ // Advance.
+ num_bits -= cur_bits;
}
- return selected;
+ return result;
}
+
+ private:
+ Common::TinyMT m_rng;
+ u32 m_entropy{};
+ u32 m_bits_available{};
};
public:
- static constexpr std::size_t MaxDepth = 4;
-
-private:
- std::array<u64*, MaxDepth> bit_storages{};
- RandomBitGenerator rng{};
- std::size_t num_bits{};
- std::size_t used_depths{};
+ static constexpr size_t MaxDepth = 4;
public:
KPageBitmap() = default;
- constexpr std::size_t GetNumBits() const {
- return num_bits;
+ constexpr size_t GetNumBits() const {
+ return m_num_bits;
}
constexpr s32 GetHighestDepthIndex() const {
- return static_cast<s32>(used_depths) - 1;
+ return static_cast<s32>(m_used_depths) - 1;
}
- u64* Initialize(u64* storage, std::size_t size) {
+ u64* Initialize(u64* storage, size_t size) {
// Initially, everything is un-set.
- num_bits = 0;
+ m_num_bits = 0;
// Calculate the needed bitmap depth.
- used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
- ASSERT(used_depths <= MaxDepth);
+ m_used_depths = static_cast<size_t>(GetRequiredDepth(size));
+ ASSERT(m_used_depths <= MaxDepth);
// Set the bitmap pointers.
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
- bit_storages[depth] = storage;
+ m_bit_storages[depth] = storage;
size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>();
storage += size;
+ m_end_storages[depth] = storage;
}
return storage;
@@ -128,19 +147,19 @@ public:
if (random) {
do {
- const u64 v = bit_storages[depth][offset];
+ const u64 v = m_bit_storages[depth][offset];
if (v == 0) {
// If depth is bigger than zero, then a previous level indicated a block was
// free.
ASSERT(depth == 0);
return -1;
}
- offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v);
+ offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v);
++depth;
- } while (depth < static_cast<s32>(used_depths));
+ } while (depth < static_cast<s32>(m_used_depths));
} else {
do {
- const u64 v = bit_storages[depth][offset];
+ const u64 v = m_bit_storages[depth][offset];
if (v == 0) {
// If depth is bigger than zero, then a previous level indicated a block was
// free.
@@ -149,28 +168,69 @@ public:
}
offset = offset * Common::BitSize<u64>() + std::countr_zero(v);
++depth;
- } while (depth < static_cast<s32>(used_depths));
+ } while (depth < static_cast<s32>(m_used_depths));
}
return static_cast<s64>(offset);
}
- void SetBit(std::size_t offset) {
+ s64 FindFreeRange(size_t count) {
+ // Check that it is possible to find a range.
+ const u64* const storage_start = m_bit_storages[m_used_depths - 1];
+ const u64* const storage_end = m_end_storages[m_used_depths - 1];
+
+ // If we don't have a storage to iterate (or want more blocks than fit in a single storage),
+ // we can't find a free range.
+ if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) {
+ return -1;
+ }
+
+ // Walk the storages to select a random free range.
+ const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1);
+ const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1);
+
+ const u64 free_mask = (static_cast<u64>(1) << count) - 1;
+
+ size_t num_valid_options = 0;
+ s64 chosen_offset = -1;
+ for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) {
+ u64 storage = storage_start[storage_index];
+ for (size_t option = 0; option < options_per_storage; ++option) {
+ if ((storage & free_mask) == free_mask) {
+ // We've found a new valid option.
+ ++num_valid_options;
+
+ // Select the Kth valid option with probability 1/K. This leads to an overall
+ // uniform distribution.
+ if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) {
+ // This is our first option, so select it.
+ chosen_offset = storage_index * Common::BitSize<u64>() + option * count;
+ }
+ }
+ storage >>= count;
+ }
+ }
+
+ // Return the random offset we chose.*/
+ return chosen_offset;
+ }
+
+ void SetBit(size_t offset) {
this->SetBit(this->GetHighestDepthIndex(), offset);
- num_bits++;
+ m_num_bits++;
}
- void ClearBit(std::size_t offset) {
+ void ClearBit(size_t offset) {
this->ClearBit(this->GetHighestDepthIndex(), offset);
- num_bits--;
+ m_num_bits--;
}
- bool ClearRange(std::size_t offset, std::size_t count) {
+ bool ClearRange(size_t offset, size_t count) {
s32 depth = this->GetHighestDepthIndex();
- u64* bits = bit_storages[depth];
- std::size_t bit_ind = offset / Common::BitSize<u64>();
- if (count < Common::BitSize<u64>()) {
- const std::size_t shift = offset % Common::BitSize<u64>();
+ u64* bits = m_bit_storages[depth];
+ size_t bit_ind = offset / Common::BitSize<u64>();
+ if (count < Common::BitSize<u64>()) [[likely]] {
+ const size_t shift = offset % Common::BitSize<u64>();
ASSERT(shift + count <= Common::BitSize<u64>());
// Check that all the bits are set.
const u64 mask = ((u64(1) << count) - 1) << shift;
@@ -189,8 +249,8 @@ public:
ASSERT(offset % Common::BitSize<u64>() == 0);
ASSERT(count % Common::BitSize<u64>() == 0);
// Check that all the bits are set.
- std::size_t remaining = count;
- std::size_t i = 0;
+ size_t remaining = count;
+ size_t i = 0;
do {
if (bits[bit_ind + i++] != ~u64(0)) {
return false;
@@ -209,18 +269,18 @@ public:
} while (remaining > 0);
}
- num_bits -= count;
+ m_num_bits -= count;
return true;
}
private:
- void SetBit(s32 depth, std::size_t offset) {
+ void SetBit(s32 depth, size_t offset) {
while (depth >= 0) {
- std::size_t ind = offset / Common::BitSize<u64>();
- std::size_t which = offset % Common::BitSize<u64>();
+ size_t ind = offset / Common::BitSize<u64>();
+ size_t which = offset % Common::BitSize<u64>();
const u64 mask = u64(1) << which;
- u64* bit = std::addressof(bit_storages[depth][ind]);
+ u64* bit = std::addressof(m_bit_storages[depth][ind]);
u64 v = *bit;
ASSERT((v & mask) == 0);
*bit = v | mask;
@@ -232,13 +292,13 @@ private:
}
}
- void ClearBit(s32 depth, std::size_t offset) {
+ void ClearBit(s32 depth, size_t offset) {
while (depth >= 0) {
- std::size_t ind = offset / Common::BitSize<u64>();
- std::size_t which = offset % Common::BitSize<u64>();
+ size_t ind = offset / Common::BitSize<u64>();
+ size_t which = offset % Common::BitSize<u64>();
const u64 mask = u64(1) << which;
- u64* bit = std::addressof(bit_storages[depth][ind]);
+ u64* bit = std::addressof(m_bit_storages[depth][ind]);
u64 v = *bit;
ASSERT((v & mask) != 0);
v &= ~mask;
@@ -252,7 +312,7 @@ private:
}
private:
- static constexpr s32 GetRequiredDepth(std::size_t region_size) {
+ static constexpr s32 GetRequiredDepth(size_t region_size) {
s32 depth = 0;
while (true) {
region_size /= Common::BitSize<u64>();
@@ -264,8 +324,8 @@ private:
}
public:
- static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
- std::size_t overhead_bits = 0;
+ static constexpr size_t CalculateManagementOverheadSize(size_t region_size) {
+ size_t overhead_bits = 0;
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
region_size =
Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>();
@@ -273,6 +333,13 @@ public:
}
return overhead_bits * sizeof(u64);
}
+
+private:
+ std::array<u64*, MaxDepth> m_bit_storages{};
+ std::array<u64*, MaxDepth> m_end_storages{};
+ RandomBitGenerator m_rng;
+ size_t m_num_bits{};
+ size_t m_used_depths{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
index aef06e213..cfedaae61 100644
--- a/src/core/hle/kernel/k_page_buffer.h
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -11,6 +11,16 @@
namespace Kernel {
+class KernelCore;
+
+class KPageBufferSlabHeap : protected impl::KSlabHeapImpl {
+public:
+ static constexpr size_t BufferSize = PageSize;
+
+public:
+ void Initialize(Core::System& system);
+};
+
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
public:
explicit KPageBuffer(KernelCore&) {}
@@ -21,8 +31,6 @@ public:
private:
[[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
};
-
-static_assert(sizeof(KPageBuffer) == PageSize);
-static_assert(alignof(KPageBuffer) == PageSize);
+static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h
index 968753992..316f172f2 100644
--- a/src/core/hle/kernel/k_page_group.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -5,6 +5,7 @@
#include <list>
+#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory_types.h"
@@ -12,6 +13,89 @@
namespace Kernel {
+class KPageGroup;
+
+class KBlockInfo {
+private:
+ friend class KPageGroup;
+
+public:
+ constexpr KBlockInfo() = default;
+
+ constexpr void Initialize(PAddr addr, size_t np) {
+ ASSERT(Common::IsAligned(addr, PageSize));
+ ASSERT(static_cast<u32>(np) == np);
+
+ m_page_index = static_cast<u32>(addr) / PageSize;
+ m_num_pages = static_cast<u32>(np);
+ }
+
+ constexpr PAddr GetAddress() const {
+ return m_page_index * PageSize;
+ }
+ constexpr size_t GetNumPages() const {
+ return m_num_pages;
+ }
+ constexpr size_t GetSize() const {
+ return this->GetNumPages() * PageSize;
+ }
+ constexpr PAddr GetEndAddress() const {
+ return (m_page_index + m_num_pages) * PageSize;
+ }
+ constexpr PAddr GetLastAddress() const {
+ return this->GetEndAddress() - 1;
+ }
+
+ constexpr KBlockInfo* GetNext() const {
+ return m_next;
+ }
+
+ constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const {
+ return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages;
+ }
+
+ constexpr bool operator==(const KBlockInfo& rhs) const {
+ return this->IsEquivalentTo(rhs);
+ }
+
+ constexpr bool operator!=(const KBlockInfo& rhs) const {
+ return !(*this == rhs);
+ }
+
+ constexpr bool IsStrictlyBefore(PAddr addr) const {
+ const PAddr end = this->GetEndAddress();
+
+ if (m_page_index != 0 && end == 0) {
+ return false;
+ }
+
+ return end < addr;
+ }
+
+ constexpr bool operator<(PAddr addr) const {
+ return this->IsStrictlyBefore(addr);
+ }
+
+ constexpr bool TryConcatenate(PAddr addr, size_t np) {
+ if (addr != 0 && addr == this->GetEndAddress()) {
+ m_num_pages += static_cast<u32>(np);
+ return true;
+ }
+ return false;
+ }
+
+private:
+ constexpr void SetNext(KBlockInfo* next) {
+ m_next = next;
+ }
+
+private:
+ KBlockInfo* m_next{};
+ u32 m_page_index{};
+ u32 m_num_pages{};
+};
+static_assert(sizeof(KBlockInfo) <= 0x10);
+
class KPageGroup final {
public:
class Node final {
@@ -92,6 +176,8 @@ public:
return nodes.empty();
}
+ void Finalize() {}
+
private:
std::list<Node> nodes;
};
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 5ede60168..7b02c7d8b 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
return num_free;
}
-PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
+PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
- if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) {
+ if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
@@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
return 0;
}
+PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
+ // Get the size and required alignment.
+ const size_t needed_size = num_pages * PageSize;
+ const size_t align_size = align_pages * PageSize;
+
+ // Determine meta-alignment of our desired alignment size.
+ const size_t align_shift = std::countr_zero(align_size);
+
+ // Decide on a block to allocate from.
+ constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
+ {
+ // By default, we'll want to look at all blocks larger than our current one.
+ s32 max_blocks = static_cast<s32>(m_num_blocks);
+
+ // Determine the maximum block we should try to allocate from.
+ size_t possible_alignments = 0;
+ for (s32 i = index; i < max_blocks; ++i) {
+ // Add the possible alignments from blocks at the current size.
+ possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
+ m_blocks[i].GetNumFreeBlocks();
+
+ // If there are enough possible alignments, we don't need to look at larger blocks.
+ if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
+ max_blocks = i + 1;
+ break;
+ }
+ }
+
+ // If we have any possible alignments which require a larger block, we need to pick one.
+ if (possible_alignments > 0 && index + 1 < max_blocks) {
+ // Select a random alignment from the possibilities.
+ const size_t rnd = m_rng.GenerateRandom(possible_alignments);
+
+ // Determine which block corresponds to the random alignment we chose.
+ possible_alignments = 0;
+ for (s32 i = index; i < max_blocks; ++i) {
+ // Add the possible alignments from blocks at the current size.
+ possible_alignments +=
+ (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
+ m_blocks[i].GetNumFreeBlocks();
+
+ // If the current block gets us to our random choice, use the current block.
+ if (rnd < possible_alignments) {
+ index = i;
+ break;
+ }
+ }
+ }
+ }
+
+ // Pop a block from the index we selected.
+ if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) {
+ // Determine how much size we have left over.
+ if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
+ leftover_size > 0) {
+ // Determine how many valid alignments we can have.
+ const size_t possible_alignments = 1 + (leftover_size >> align_shift);
+
+ // Select a random valid alignment.
+ const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
+
+ // Free memory before the random offset.
+ if (random_offset != 0) {
+ this->Free(addr, random_offset / PageSize);
+ }
+
+ // Advance our block by the random offset.
+ addr += random_offset;
+
+ // Free memory after our allocated block.
+ if (random_offset != leftover_size) {
+ this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
+ }
+ }
+
+ // Return the block we allocated.
+ return addr;
+ }
+
+ return 0;
+}
+
void KPageHeap::FreeBlock(PAddr block, s32 index) {
do {
block = m_blocks[index++].PushBlock(block);
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index 0917a8bed..9021edcf7 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -14,13 +14,9 @@
namespace Kernel {
-class KPageHeap final {
+class KPageHeap {
public:
- YUZU_NON_COPYABLE(KPageHeap);
- YUZU_NON_MOVEABLE(KPageHeap);
-
KPageHeap() = default;
- ~KPageHeap() = default;
constexpr PAddr GetAddress() const {
return m_heap_address;
@@ -57,7 +53,20 @@ public:
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
- PAddr AllocateBlock(s32 index, bool random);
+ PAddr AllocateBlock(s32 index, bool random) {
+ if (random) {
+ const size_t block_pages = m_blocks[index].GetNumPages();
+ return this->AllocateByRandom(index, block_pages, block_pages);
+ } else {
+ return this->AllocateByLinearSearch(index);
+ }
+ }
+
+ PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
+ // TODO: linear search support?
+ return this->AllocateByRandom(index, num_pages, align_pages);
+ }
+
void Free(PAddr addr, size_t num_pages);
static size_t CalculateManagementOverheadSize(size_t region_size) {
@@ -68,7 +77,7 @@ public:
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
const size_t target_pages = std::max(num_pages, align_pages);
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
- if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
@@ -77,7 +86,7 @@ public:
static constexpr s32 GetBlockIndex(size_t num_pages) {
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
- if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
@@ -85,7 +94,7 @@ public:
}
static constexpr size_t GetBlockSize(size_t index) {
- return size_t(1) << MemoryBlockPageShifts[index];
+ return static_cast<size_t>(1) << MemoryBlockPageShifts[index];
}
static constexpr size_t GetBlockNumPages(size_t index) {
@@ -93,13 +102,9 @@ public:
}
private:
- class Block final {
+ class Block {
public:
- YUZU_NON_COPYABLE(Block);
- YUZU_NON_MOVEABLE(Block);
-
Block() = default;
- ~Block() = default;
constexpr size_t GetShift() const {
return m_block_shift;
@@ -201,6 +206,9 @@ private:
};
private:
+ PAddr AllocateByLinearSearch(s32 index);
+ PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
+
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
size_t num_block_shifts);
@@ -209,7 +217,8 @@ private:
size_t m_heap_size{};
size_t m_initial_used_size{};
size_t m_num_blocks{};
- std::array<Block, NumMemoryBlockPageShifts> m_blocks{};
+ std::array<Block, NumMemoryBlockPageShifts> m_blocks;
+ KPageBitmap::RandomBitGenerator m_rng;
std::vector<u64> m_management_data;
};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 307e491cb..fab55a057 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -15,6 +15,7 @@
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_system_control.h"
+#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
@@ -23,6 +24,61 @@ namespace Kernel {
namespace {
+class KScopedLightLockPair {
+ YUZU_NON_COPYABLE(KScopedLightLockPair);
+ YUZU_NON_MOVEABLE(KScopedLightLockPair);
+
+private:
+ KLightLock* m_lower;
+ KLightLock* m_upper;
+
+public:
+ KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
+ // Ensure our locks are in a consistent order.
+ if (std::addressof(lhs) <= std::addressof(rhs)) {
+ m_lower = std::addressof(lhs);
+ m_upper = std::addressof(rhs);
+ } else {
+ m_lower = std::addressof(rhs);
+ m_upper = std::addressof(lhs);
+ }
+
+ // Acquire both locks.
+ m_lower->Lock();
+ if (m_lower != m_upper) {
+ m_upper->Lock();
+ }
+ }
+
+ ~KScopedLightLockPair() {
+ // Unlock the upper lock.
+ if (m_upper != nullptr && m_upper != m_lower) {
+ m_upper->Unlock();
+ }
+
+ // Unlock the lower lock.
+ if (m_lower != nullptr) {
+ m_lower->Unlock();
+ }
+ }
+
+public:
+ // Utility.
+ void TryUnlockHalf(KLightLock& lock) {
+ // Only allow unlocking if the lock is half the pair.
+ if (m_lower != m_upper) {
+ // We want to be sure the lock is one we own.
+ if (m_lower == std::addressof(lock)) {
+ lock.Unlock();
+ m_lower = nullptr;
+ } else if (m_upper == std::addressof(lock)) {
+ lock.Unlock();
+ m_upper = nullptr;
+ }
+ }
+ }
+};
+
using namespace Common::Literals;
constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
@@ -49,9 +105,10 @@ KPageTable::KPageTable(Core::System& system_)
KPageTable::~KPageTable() = default;
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- VAddr code_addr, size_t code_size,
- KMemoryBlockSlabManager* mem_block_slab_manager,
- KMemoryManager::Pool pool) {
+ bool enable_das_merge, bool from_back,
+ KMemoryManager::Pool pool, VAddr code_addr,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit) {
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
@@ -112,11 +169,13 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
// Set other basic fields
m_enable_aslr = enable_aslr;
- m_enable_device_address_space_merge = false;
+ m_enable_device_address_space_merge = enable_das_merge;
m_address_space_start = start;
m_address_space_end = end;
m_is_kernel = false;
- m_memory_block_slab_manager = mem_block_slab_manager;
+ m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
+ m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
+ m_resource_limit = resource_limit;
// Determine the region we can place our undetermineds in
VAddr alloc_start{};
@@ -215,10 +274,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
}
}
- // Set heap members
+ // Set heap and fill members.
m_current_heap_end = m_heap_region_start;
m_max_heap_size = 0;
- m_max_physical_memory_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_mapped_unsafe_physical_memory = 0;
+ m_mapped_insecure_memory = 0;
+ m_mapped_ipc_server_memory = 0;
+
+ m_heap_fill_value = 0;
+ m_ipc_fill_value = 0;
+ m_stack_fill_value = 0;
+
+ // Set allocation option.
+ m_allocate_option =
+ KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
+ : KMemoryManager::Direction::FromFront);
// Ensure that we regions inside our address space
auto IsInAddressSpace = [&](VAddr addr) {
@@ -267,6 +338,16 @@ void KPageTable::Finalize() {
m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
});
+ // Release any insecure mapped memory.
+ if (m_mapped_insecure_memory) {
+ UNIMPLEMENTED();
+ }
+
+ // Release any ipc server memory.
+ if (m_mapped_ipc_server_memory) {
+ UNIMPLEMENTED();
+ }
+
// Close the backing page table, as the destructor is not called for guest objects.
m_page_table_impl.reset();
}
@@ -650,7 +731,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
VAddr src_addr) {
- KScopedLightLock lk(m_general_lock);
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
const size_t num_pages{size / PageSize};
@@ -686,9 +768,753 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
R_SUCCEED();
}
+Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ VAddr address, size_t size, KMemoryPermission test_perm,
+ KMemoryState dst_state) {
+ // Validate pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
+ test_perm == KMemoryPermission::UserRead);
+
+ // Check that the address is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get the source permission.
+ const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead;
+
+ // Get aligned extents.
+ const VAddr aligned_src_start = Common::AlignDown((address), PageSize);
+ const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize);
+ const VAddr mapping_src_start = Common::AlignUp((address), PageSize);
+ const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize);
+
+ const auto aligned_src_last = (aligned_src_end)-1;
+ const auto mapping_src_last = (mapping_src_end)-1;
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
+ src_perm);
+ }
+ };
+
+ size_t blocks_needed = 0;
+
+ // Iterate, mapping as needed.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Validate the current block.
+ R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
+ test_attr_mask, KMemoryAttribute::None));
+
+ if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
+ info.GetAddress() < (mapping_src_end)) {
+ const auto cur_start =
+ info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start);
+ const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
+ : (mapping_src_end);
+ const size_t cur_size = cur_end - cur_start;
+
+ if (info.GetAddress() < (mapping_src_start)) {
+ ++blocks_needed;
+ }
+ if (mapping_src_last < info.GetLastAddress()) {
+ ++blocks_needed;
+ }
+
+ // Set the permissions on the block, if we need to.
+ if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
+ R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
+ OperationType::ChangePermissions));
+ }
+
+ // Note that we mapped this part.
+ mapped_size += cur_size;
+ }
+
+ // If the block is at the end, we're done.
+ if (aligned_src_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+
+ if (out_blocks_needed != nullptr) {
+ ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ *out_blocks_needed = blocks_needed;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
+ KMemoryPermission test_perm, KMemoryState dst_state,
+ KPageTable& src_page_table, bool send) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(src_page_table.IsLockedByCurrentThread());
+
+ // Check that we can theoretically map.
+ const VAddr region_start = m_alias_region_start;
+ const size_t region_size = m_alias_region_end - m_alias_region_start;
+ R_UNLESS(size < region_size, ResultOutOfAddressSpace);
+
+ // Get aligned source extents.
+ const VAddr src_start = src_addr;
+ const VAddr src_end = src_addr + size;
+ const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize);
+ const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize);
+ const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize);
+ const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize);
+ const size_t aligned_src_size = aligned_src_end - aligned_src_start;
+ const size_t mapping_src_size =
+ (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
+
+ // Select a random address to map at.
+ VAddr dst_addr =
+ this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
+ PageSize, 0, this->GetNumGuardPages());
+
+ R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
+
+ // Check that we can perform the operation we're about to perform.
+ ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reserve space for any partial pages we allocate.
+ const size_t unmapped_size = aligned_src_size - mapping_src_size;
+ KScopedResourceReservation memory_reservation(m_resource_limit,
+ LimitableResource::PhysicalMemory, unmapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Ensure that we manage page references correctly.
+ PAddr start_partial_page = 0;
+ PAddr end_partial_page = 0;
+ VAddr cur_mapped_addr = dst_addr;
+
+ // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
+ // free on scope exit.
+ SCOPE_EXIT({
+ if (start_partial_page != 0) {
+ m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
+ }
+ if (end_partial_page != 0) {
+ m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
+ }
+ });
+
+ ON_RESULT_FAILURE {
+ if (cur_mapped_addr != dst_addr) {
+ // HACK: Manually close the pages.
+ HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize);
+
+ ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
+ KMemoryPermission::None, OperationType::Unmap)
+ .IsSuccess());
+ }
+ };
+
+ // Allocate the start page as needed.
+ if (aligned_src_start < mapping_src_start) {
+ start_partial_page =
+ m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Allocate the end page as needed.
+ if (mapping_src_end < aligned_src_end &&
+ (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
+ end_partial_page =
+ m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Get the implementation.
+ auto& src_impl = src_page_table.PageTableImpl();
+
+ // Get the fill value for partial pages.
+ const auto fill_val = m_ipc_fill_value;
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry next_entry;
+ bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ PAddr cur_block_addr = next_entry.phys_addr;
+ size_t cur_block_size =
+ next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
+ size_t tot_block_size = cur_block_size;
+
+ // Map the start page, if we have one.
+ if (start_partial_page != 0) {
+ // Ensure the page holds correct data.
+ const VAddr start_partial_virt =
+ GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
+ if (send) {
+ const size_t partial_offset = src_start - aligned_src_start;
+ size_t copy_size, clear_size;
+ if (src_end < mapping_src_start) {
+ copy_size = size;
+ clear_size = mapping_src_start - src_end;
+ } else {
+ copy_size = mapping_src_start - src_start;
+ clear_size = 0;
+ }
+
+ std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val,
+ partial_offset);
+ std::memcpy(
+ m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset),
+ m_system.Memory().GetPointer<void>(
+ GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) +
+ partial_offset),
+ copy_size);
+ if (clear_size > 0) {
+ std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset +
+ copy_size),
+ fill_val, clear_size);
+ }
+ } else {
+ std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize);
+ }
+
+ // Map the page.
+ R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
+
+ // HACK: Manually open the pages.
+ HACK_OpenPages(start_partial_page, 1);
+
+ // Update tracking extents.
+ cur_mapped_addr += PageSize;
+ cur_block_addr += PageSize;
+ cur_block_size -= PageSize;
+
+ // If the block's size was one page, we may need to continue traversal.
+ if (cur_block_size == 0 && aligned_src_size > PageSize) {
+ traverse_valid = src_impl.ContinueTraversal(next_entry, context);
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ tot_block_size += next_entry.block_size;
+ }
+ }
+
+ // Map the remaining pages.
+ while (aligned_src_start + tot_block_size < mapping_src_end) {
+ // Continue the traversal.
+ traverse_valid = src_impl.ContinueTraversal(next_entry, context);
+ ASSERT(traverse_valid);
+
+ // Process the block.
+ if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
+ // Map the block we've been processing so far.
+ R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
+ cur_block_addr));
+
+ // HACK: Manually open the pages.
+ HACK_OpenPages(cur_block_addr, cur_block_size / PageSize);
+
+ // Update tracking extents.
+ cur_mapped_addr += cur_block_size;
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ } else {
+ cur_block_size += next_entry.block_size;
+ }
+ tot_block_size += next_entry.block_size;
+ }
+
+ // Handle the last direct-mapped page.
+ if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size;
+ mapped_block_end < mapping_src_end) {
+ const size_t last_block_size = mapping_src_end - mapped_block_end;
+
+ // Map the last block.
+ R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
+ cur_block_addr));
+
+ // HACK: Manually open the pages.
+ HACK_OpenPages(cur_block_addr, last_block_size / PageSize);
+
+ // Update tracking extents.
+ cur_mapped_addr += last_block_size;
+ cur_block_addr += last_block_size;
+ if (mapped_block_end + cur_block_size < aligned_src_end &&
+ cur_block_size == last_block_size) {
+ traverse_valid = src_impl.ContinueTraversal(next_entry, context);
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ }
+ }
+
+ // Map the end page, if we have one.
+ if (end_partial_page != 0) {
+ // Ensure the page holds correct data.
+ const VAddr end_partial_virt =
+ GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
+ if (send) {
+ const size_t copy_size = src_end - mapping_src_end;
+ std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt),
+ m_system.Memory().GetPointer<void>(GetHeapVirtualAddress(
+ m_system.Kernel().MemoryLayout(), cur_block_addr)),
+ copy_size);
+ std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val,
+ PageSize - copy_size);
+ } else {
+ std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize);
+ }
+
+ // Map the page.
+ R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
+
+ // HACK: Manually open the pages.
+ HACK_OpenPages(end_partial_page, 1);
+ }
+
+ // Update memory blocks to reflect our changes
+ m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
+ dst_state, test_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Set the output address.
+ *out_addr = dst_addr + (src_start - aligned_src_start);
+
+ // We succeeded.
+ memory_reservation.Commit();
+ R_SUCCEED();
+}
+
+Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
+ KPageTable& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send) {
+ // For convenience, alias this.
+ KPageTable& dst_page_table = *this;
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(std::addressof(src_page_table));
+
+ // Perform client setup.
+ size_t num_allocator_blocks;
+ R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
+ std::addressof(num_allocator_blocks), src_addr, size,
+ test_perm, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ src_page_table.m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Get the mapped extents.
+ const VAddr src_map_start = Common::AlignUp((src_addr), PageSize);
+ const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize);
+ const size_t src_map_size = src_map_end - src_map_start;
+
+ // Ensure that we clean up appropriately if we fail after this.
+ const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead;
+ ON_RESULT_FAILURE {
+ if (src_map_end > src_map_start) {
+ src_page_table.CleanupForIpcClientOnServerSetupFailure(
+ updater.GetPageList(), src_map_start, src_map_size, src_perm);
+ }
+ };
+
+ // Perform server setup.
+ R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
+ src_page_table, send));
+
+ // If anything was mapped, ipc-lock the pages.
+ if (src_map_start < src_map_end) {
+ // Get the source permission.
+ src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
+ (src_map_end - src_map_start) / PageSize,
+ &KMemoryBlock::LockForIpc, src_perm);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, dst_state, KMemoryPermission::UserRead,
+ KMemoryPermission::UserRead, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Get aligned extents.
+ const VAddr aligned_start = Common::AlignDown((address), PageSize);
+ const VAddr aligned_end = Common::AlignUp((address) + size, PageSize);
+ const size_t aligned_size = aligned_end - aligned_start;
+ const size_t aligned_num_pages = aligned_size / PageSize;
+
+ // HACK: Manually close the pages.
+ HACK_ClosePages(aligned_start, aligned_num_pages);
+
+ // Unmap the pages.
+ R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
+
+ // Update memory blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
+ KMemoryState::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ // Release from the resource limit as relevant.
+ const VAddr mapping_start = Common::AlignUp((address), PageSize);
+ const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
+ const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
+ m_resource_limit->Release(LimitableResource::PhysicalMemory, aligned_size - mapping_size);
+
+ R_SUCCEED();
+}
+
+Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get aligned source extents.
+ const VAddr mapping_start = Common::AlignUp((address), PageSize);
+ const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
+ const VAddr mapping_last = mapping_end - 1;
+ const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
+
+ // If nothing was mapped, we're actually done immediately.
+ R_SUCCEED_IF(mapping_size == 0);
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Lock the table.
+ // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
+ // convention elsewhere in KPageTable.
+ KScopedLightLock lk(m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ // Determine where the mapping ends.
+ const auto mapped_end = (mapping_start) + mapped_size;
+ const auto mapped_last = mapped_end - 1;
+
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Get the current block info.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+
+ // Create tracking variables.
+ VAddr cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first =
+ cur_info.GetIpcDisableMergeCount() == 1 &&
+ (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
+ KMemoryBlockDisableMergeAttribute::None;
+
+ while (((cur_address) + cur_size - 1) < mapped_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Check if we can consolidate the next block's permission set with the current one.
+
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a
+ // single call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
+ OperationType::ChangePermissions)
+ .IsSuccess());
+ }
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ if ((first || cur_needs_set_perm) && !cur_perm_eq) {
+ ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
+ OperationType::ChangePermissions)
+ .IsSuccess());
+ }
+ }
+ };
+
+ // Iterate, reprotecting as needed.
+ {
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Validate the current block.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+ ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
+ KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked,
+ KMemoryAttribute::IpcLocked)
+ .IsSuccess());
+
+ // Create tracking variables.
+ VAddr cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first =
+ cur_info.GetIpcDisableMergeCount() == 1 &&
+ (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
+ KMemoryBlockDisableMergeAttribute::None;
+
+ while ((cur_address + cur_size - 1) < mapping_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Validate the next block.
+ ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
+ KMemoryPermission::None, KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked,
+ KMemoryAttribute::IpcLocked)
+ .IsSuccess());
+
+ // Check if we can consolidate the next block's permission set with the current one.
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a single
+ // call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ R_TRY(Operate(cur_address, cur_size / PageSize,
+ cur_needs_set_perm ? cur_info.GetOriginalPermission()
+ : cur_info.GetPermission(),
+ OperationType::ChangePermissions));
+ }
+
+ // Mark that we mapped the block.
+ mapped_size += cur_size;
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ const auto lock_count =
+ cur_info.GetIpcLockCount() +
+ (next_it != m_memory_block_manager.end()
+ ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+ : 0);
+ if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
+ R_TRY(Operate(cur_address, cur_size / PageSize,
+ cur_needs_set_perm ? cur_info.GetOriginalPermission()
+ : cur_info.GetPermission(),
+ OperationType::ChangePermissions));
+ }
+ }
+
+ // Create an update allocator.
+ // NOTE: Guaranteed zero blocks needed here.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, 0);
+ R_TRY(allocator_result);
+
+ // Unlock the pages.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
+ mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
+ VAddr address, size_t size,
+ KMemoryPermission prot_perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+
+ // Get the mapped extents.
+ const VAddr src_map_start = address;
+ const VAddr src_map_end = address + size;
+ const VAddr src_map_last = src_map_end - 1;
+
+ // This function is only invoked when there's something to do.
+ ASSERT(src_map_end > src_map_start);
+
+ // Iterate over blocks, fixing permissions.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const auto cur_start =
+ info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start;
+ const auto cur_end =
+ src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
+
+ // If we can, fix the protections on the block.
+ if ((info.GetIpcLockCount() == 0 &&
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
+ (info.GetIpcLockCount() != 0 &&
+ (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
+ // Check if we actually need to fix the protections on the block.
+ if (cur_end == src_map_end || info.GetAddress() <= src_map_start ||
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
+ ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
+ OperationType::ChangePermissions)
+ .IsSuccess());
+ }
+ }
+
+ // If we're past the end of the region, we're done.
+ if (src_map_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+}
+
+void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
+ m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
+}
+
+void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) {
+ for (size_t index = 0; index < num_pages; ++index) {
+ const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize));
+ m_system.Kernel().MemoryManager().Close(paddr, 1);
+ }
+}
+
Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Lock the physical memory lock.
- KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
// Calculate the last address for convenience.
const VAddr last_address = address + size - 1;
@@ -742,15 +1568,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
{
// Reserve the memory from the process resource limit.
KScopedResourceReservation memory_reservation(
- m_system.Kernel().CurrentProcess()->GetResourceLimit(),
- LimitableResource::PhysicalMemory, size - mapped_size);
+ m_resource_limit, LimitableResource::PhysicalMemory, size - mapped_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the new memory.
KPageGroup pg;
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
- &pg, (size - mapped_size) / PageSize,
- KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
+ R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
+ &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
+
+ // If we fail in the next bit (or retry), we need to cleanup the pages.
+ // auto pg_guard = SCOPE_GUARD {
+ // pg.OpenFirst();
+ // pg.Close();
+ //};
// Map the memory.
{
@@ -810,15 +1640,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
// Create an update allocator.
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager,
num_allocator_blocks);
R_TRY(allocator_result);
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Prepare to iterate over the memory.
+ auto pg_it = pg.Nodes().begin();
+ PAddr pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
// Reset the current tracking address, and make sure we clean up on failure.
+ // pg_guard.Cancel();
cur_address = address;
- auto unmap_guard = detail::ScopeExit([&] {
+ ON_RESULT_FAILURE {
if (cur_address > address) {
const VAddr last_unmap_address = cur_address - 1;
@@ -841,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
last_unmap_address + 1 - cur_address) /
PageSize;
+ // HACK: Manually close the pages.
+ HACK_ClosePages(cur_address, cur_pages);
+
// Unmap.
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
OperationType::Unmap)
@@ -857,12 +1699,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
++it;
}
}
- });
- // Iterate over the memory.
- auto pg_it = pg.Nodes().begin();
- PAddr pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
+ // Release any remaining unmapped memory.
+ m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
+ m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
+ for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) {
+ m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
+ pg_it->GetNumPages());
+ m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
+ pg_it->GetNumPages());
+ }
+ };
auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
@@ -897,6 +1744,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
OperationType::Map, pg_phys_addr));
+ // HACK: Manually open the pages.
+ HACK_OpenPages(pg_phys_addr, cur_pages);
+
// Advance.
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
@@ -928,9 +1778,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
- // Cancel our guard.
- unmap_guard.Cancel();
-
R_SUCCEED();
}
}
@@ -939,7 +1786,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
// Lock the physical memory lock.
- KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
// Lock the table.
KScopedLightLock lk(m_general_lock);
@@ -948,8 +1795,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
const VAddr last_address = address + size - 1;
// Define iteration variables.
- VAddr cur_address = 0;
- size_t mapped_size = 0;
+ VAddr map_start_address = 0;
+ VAddr map_last_address = 0;
+
+ VAddr cur_address;
+ size_t mapped_size;
size_t num_allocator_blocks = 0;
// Check if the memory is mapped.
@@ -975,27 +1825,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
if (is_normal) {
R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+ if (map_start_address == 0) {
+ map_start_address = cur_address;
+ }
+ map_last_address =
+ (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
+
if (info.GetAddress() < address) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
++num_allocator_blocks;
}
+
+ mapped_size += (map_last_address + 1 - cur_address);
}
// Check if we're done.
if (last_address <= info.GetLastAddress()) {
- if (is_normal) {
- mapped_size += (last_address + 1 - cur_address);
- }
break;
}
- // Track the memory if it's mapped.
- if (is_normal) {
- mapped_size += VAddr(info.GetEndAddress()) - cur_address;
- }
-
// Advance.
cur_address = info.GetEndAddress();
++it;
@@ -1005,125 +1855,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
R_SUCCEED_IF(mapped_size == 0);
}
- // Make a page group for the unmap region.
- KPageGroup pg;
- {
- auto& impl = this->PageTableImpl();
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
- bool cur_valid = false;
- Common::PageTable::TraversalEntry next_entry;
- bool next_valid = false;
- size_t tot_size = 0;
-
- cur_address = address;
- next_valid = impl.BeginTraversal(next_entry, context, cur_address);
- next_entry.block_size =
- (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
-
- // Iterate, building the group.
- while (true) {
- if ((!next_valid && !cur_valid) ||
- (next_valid && cur_valid &&
- next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
- cur_entry.block_size += next_entry.block_size;
- } else {
- if (cur_valid) {
- // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
- R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
- }
-
- // Update tracking variables.
- tot_size += cur_entry.block_size;
- cur_entry = next_entry;
- cur_valid = next_valid;
- }
-
- if (cur_entry.block_size + tot_size >= size) {
- break;
- }
-
- next_valid = impl.ContinueTraversal(next_entry, context);
- }
-
- // Add the last block.
- if (cur_valid) {
- // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
- R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
- }
- }
- ASSERT(pg.GetNumPages() == mapped_size / PageSize);
-
// Create an update allocator.
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
- // Reset the current tracking address, and make sure we clean up on failure.
- cur_address = address;
- auto remap_guard = detail::ScopeExit([&] {
- if (cur_address > address) {
- const VAddr last_map_address = cur_address - 1;
- cur_address = address;
-
- // Iterate over the memory we unmapped.
- auto it = m_memory_block_manager.FindIterator(cur_address);
- auto pg_it = pg.Nodes().begin();
- PAddr pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
-
- while (true) {
- // Get the memory info for the pages we unmapped, convert to property.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If the memory is normal, we unmapped it and need to re-map it.
- if (info.GetState() == KMemoryState::Normal) {
- // Determine the range to map.
- size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
- last_map_address + 1 - cur_address) /
- PageSize;
-
- // While we have pages to map, map them.
- while (map_pages > 0) {
- // Check if we're at the end of the physical block.
- if (pg_pages == 0) {
- // Ensure there are more pages to map.
- ASSERT(pg_it != pg.Nodes().end());
-
- // Advance our physical block.
- ++pg_it;
- pg_phys_addr = pg_it->GetAddress();
- pg_pages = pg_it->GetNumPages();
- }
-
- // Map whatever we can.
- const size_t cur_pages = std::min(pg_pages, map_pages);
- ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
- OperationType::Map, pg_phys_addr) == ResultSuccess);
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
- // Advance.
- cur_address += cur_pages * PageSize;
- map_pages -= cur_pages;
+ // Separate the mapping.
+ R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
+ KMemoryPermission::None, OperationType::Separate));
- pg_phys_addr += cur_pages * PageSize;
- pg_pages -= cur_pages;
- }
- }
-
- // Check if we're done.
- if (last_map_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- }
- }
- });
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
// Iterate over the memory, unmapping as we go.
auto it = m_memory_block_manager.FindIterator(cur_address);
@@ -1141,8 +1888,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
last_address + 1 - cur_address) /
PageSize;
+ // HACK: Manually close the pages.
+ HACK_ClosePages(cur_address, cur_pages);
+
// Unmap.
- R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
+ ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
+ .IsSuccess());
}
// Check if we're done.
@@ -1157,8 +1908,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
// Release the memory resource.
m_mapped_physical_memory_size -= mapped_size;
- auto process{m_system.Kernel().CurrentProcess()};
- process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
+ m_resource_limit->Release(LimitableResource::PhysicalMemory, mapped_size);
// Update memory blocks.
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
@@ -1166,14 +1916,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::None);
- // TODO(bunnei): This is a workaround until the next set of changes, where we add reference
- // counting for mapped pages. Until then, we must manually close the reference to the page
- // group.
- m_system.Kernel().MemoryManager().Close(pg);
-
// We succeeded.
- remap_guard.Cancel();
-
R_SUCCEED();
}
@@ -1749,8 +2492,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
OperationType::Unmap));
// Release the memory from the resource limit.
- m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
- LimitableResource::PhysicalMemory, num_pages * PageSize);
+ m_resource_limit->Release(LimitableResource::PhysicalMemory, num_pages * PageSize);
// Apply the memory block update.
m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
@@ -1780,8 +2522,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
// Reserve memory for the heap extension.
KScopedResourceReservation memory_reservation(
- m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
- allocation_size);
+ m_resource_limit, LimitableResource::PhysicalMemory, allocation_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the heap extension.
@@ -1869,7 +2610,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
KPageGroup page_group;
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+ R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
&page_group, needed_num_pages,
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
@@ -1883,8 +2624,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
return addr;
}
-Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
- bool is_aligned) {
+Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
+ KMemoryPermission perm, bool is_aligned,
+ bool check_heap) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -1894,15 +2636,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem
// Check the memory state.
const auto test_state =
- (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state,
+ KMemoryState old_state;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
+ std::addressof(num_allocator_blocks), address, size, test_state,
test_state, perm, perm,
KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
@@ -1911,10 +2656,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
+ // Set whether the locked memory was io.
+ *out_is_io = old_state == KMemoryState::Io;
+
R_SUCCEED();
}
-Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
+Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) {
// Lightly validate the range before doing anything else.
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
@@ -1923,16 +2671,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
KScopedLightLock lk(m_general_lock);
// Check the memory state.
+ const auto test_state = KMemoryState::FlagCanDeviceMap |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_allocator_blocks), address, size,
- KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
- KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
+ std::addressof(num_allocator_blocks), address, size, test_state, test_state,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
// Create an update allocator.
- Result allocator_result{ResultSuccess};
+ Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
@@ -1976,13 +2724,28 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
R_SUCCEED();
}
+Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
+ nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
+ KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
+ KMemoryAttribute::Locked));
+}
+
+Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, nullptr));
+}
+
Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
R_RETURN(this->LockMemoryAndOpen(
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None,
- static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
- KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
KMemoryAttribute::Locked));
}
@@ -2066,6 +2829,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
break;
}
+ case OperationType::Separate: {
+ // HACK: Unimplemented.
+ break;
+ }
case OperationType::ChangePermissions:
case OperationType::ChangePermissionsAndRefresh:
break;
@@ -2075,6 +2842,17 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
R_SUCCEED();
}
+void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
+ while (page_list->Peek()) {
+ [[maybe_unused]] auto page = page_list->Pop();
+
+ // TODO(bunnei): Free pages once they are allocated in guest memory
+ // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
+ // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
+ // this->GetPageTableManager().Free(page);
+ }
+}
+
VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
@@ -2101,6 +2879,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
+ case KMemoryState::Insecure:
return m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
@@ -2136,6 +2915,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const {
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
+ case KMemoryState::Insecure:
return m_alias_code_region_end - m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
@@ -2177,6 +2957,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
+ case KMemoryState::Insecure:
return is_in_region && !is_in_heap && !is_in_alias;
case KMemoryState::Normal:
ASSERT(is_in_heap);
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index c6aeacd96..950850291 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -16,6 +16,7 @@
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
+#include "core/memory.h"
namespace Core {
class System;
@@ -23,7 +24,10 @@ class System;
namespace Kernel {
+class KBlockInfoManager;
class KMemoryBlockManager;
+class KResourceLimit;
+class KSystemResource;
class KPageTable final {
public:
@@ -36,9 +40,9 @@ public:
~KPageTable();
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- VAddr code_addr, size_t code_size,
- KMemoryBlockSlabManager* mem_block_slab_manager,
- KMemoryManager::Pool pool);
+ bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
+ VAddr code_addr, size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit);
void Finalize();
@@ -74,12 +78,20 @@ public:
KMemoryState state, KMemoryPermission perm,
PAddr map_addr = 0);
- Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
- bool is_aligned);
- Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
+ KMemoryPermission perm, bool is_aligned, bool check_heap);
+ Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
+ Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
+ Result UnlockForIpcUserBuffer(VAddr address, size_t size);
+
+ Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
+ KMemoryPermission test_perm, KMemoryState dst_state, bool send);
+ Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
+ Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
+
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@@ -97,13 +109,54 @@ public:
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
+protected:
+ struct PageLinkedList {
+ private:
+ struct Node {
+ Node* m_next;
+ std::array<u8, PageSize - sizeof(Node*)> m_buffer;
+ };
+
+ public:
+ constexpr PageLinkedList() = default;
+
+ void Push(Node* n) {
+ ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
+ n->m_next = m_root;
+ m_root = n;
+ }
+
+ void Push(Core::Memory::Memory& memory, VAddr addr) {
+ this->Push(memory.GetPointer<Node>(addr));
+ }
+
+ Node* Peek() const {
+ return m_root;
+ }
+
+ Node* Pop() {
+ Node* const r = m_root;
+
+ m_root = r->m_next;
+ r->m_next = nullptr;
+
+ return r;
+ }
+
+ private:
+ Node* m_root{};
+ };
+ static_assert(std::is_trivially_destructible<PageLinkedList>::value);
+
private:
enum class OperationType : u32 {
- Map,
- MapGroup,
- Unmap,
- ChangePermissions,
- ChangePermissionsAndRefresh,
+ Map = 0,
+ MapFirst = 1,
+ MapGroup = 2,
+ Unmap = 3,
+ ChangePermissions = 4,
+ ChangePermissionsAndRefresh = 5,
+ Separate = 6,
};
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
@@ -123,6 +176,7 @@ private:
OperationType operation);
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
PAddr map_addr = 0);
+ void FinalizeUpdate(PageLinkedList* page_list);
VAddr GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
@@ -199,6 +253,18 @@ private:
return *out != 0;
}
+ Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
+ size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
+ Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
+ KMemoryPermission test_perm, KMemoryState dst_state,
+ KPageTable& src_page_table, bool send);
+ void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
+ size_t size, KMemoryPermission prot_perm);
+
+ // HACK: These will be removed once we automatically manage page reference counts.
+ void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
+ void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
+
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
@@ -316,6 +382,31 @@ public:
addr + size - 1 <= m_address_space_end - 1;
}
+public:
+ static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ return layout.GetLinearVirtualAddress(addr);
+ }
+
+ static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ return layout.GetLinearPhysicalAddress(addr);
+ }
+
+ static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ return GetLinearMappedVirtualAddress(layout, addr);
+ }
+
+ static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ return GetLinearMappedPhysicalAddress(layout, addr);
+ }
+
+ static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
+ return GetLinearMappedVirtualAddress(layout, addr);
+ }
+
+ static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
+ return GetLinearMappedPhysicalAddress(layout, addr);
+ }
+
private:
constexpr bool IsKernel() const {
return m_is_kernel;
@@ -331,6 +422,24 @@ private:
}
private:
+ class KScopedPageTableUpdater {
+ private:
+ KPageTable* m_pt{};
+ PageLinkedList m_ll;
+
+ public:
+ explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
+ explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
+ ~KScopedPageTableUpdater() {
+ m_pt->FinalizeUpdate(this->GetPageList());
+ }
+
+ PageLinkedList* GetPageList() {
+ return &m_ll;
+ }
+ };
+
+private:
VAddr m_address_space_start{};
VAddr m_address_space_end{};
VAddr m_heap_region_start{};
@@ -347,20 +456,27 @@ private:
VAddr m_alias_code_region_start{};
VAddr m_alias_code_region_end{};
- size_t m_mapped_physical_memory_size{};
size_t m_max_heap_size{};
- size_t m_max_physical_memory_size{};
+ size_t m_mapped_physical_memory_size{};
+ size_t m_mapped_unsafe_physical_memory{};
+ size_t m_mapped_insecure_memory{};
+ size_t m_mapped_ipc_server_memory{};
size_t m_address_space_width{};
KMemoryBlockManager m_memory_block_manager;
+ u32 m_allocate_option{};
bool m_is_kernel{};
bool m_enable_aslr{};
bool m_enable_device_address_space_merge{};
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
+ KBlockInfoManager* m_block_info_manager{};
+ KResourceLimit* m_resource_limit{};
u32 m_heap_fill_value{};
+ u32 m_ipc_fill_value{};
+ u32 m_stack_fill_value{};
const KMemoryRegion* m_cached_physical_heap_region{};
KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h
new file mode 100644
index 000000000..91a45cde3
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_manager.h
@@ -0,0 +1,55 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <atomic>
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
+#include "core/hle/kernel/k_page_table_slab_heap.h"
+
+namespace Kernel {
+
+class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> {
+public:
+ using RefCount = KPageTableSlabHeap::RefCount;
+ static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize;
+
+public:
+ KPageTableManager() = default;
+
+ void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) {
+ m_pt_heap = pt_heap;
+
+ static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>);
+ BaseHeap::Initialize(page_allocator, pt_heap);
+ }
+
+ VAddr Allocate() {
+ return VAddr(BaseHeap::Allocate());
+ }
+
+ RefCount GetRefCount(VAddr addr) const {
+ return m_pt_heap->GetRefCount(addr);
+ }
+
+ void Open(VAddr addr, int count) {
+ return m_pt_heap->Open(addr, count);
+ }
+
+ bool Close(VAddr addr, int count) {
+ return m_pt_heap->Close(addr, count);
+ }
+
+ bool IsInPageTableHeap(VAddr addr) const {
+ return m_pt_heap->IsInRange(addr);
+ }
+
+private:
+ using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>;
+
+ KPageTableSlabHeap* m_pt_heap{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h
new file mode 100644
index 000000000..a9543cbd0
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_slab_heap.h
@@ -0,0 +1,93 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+#include <vector>
+
+#include "common/common_types.h"
+#include "core/hle/kernel/k_dynamic_slab_heap.h"
+#include "core/hle/kernel/slab_helpers.h"
+
+namespace Kernel {
+
+namespace impl {
+
+class PageTablePage {
+public:
+ // Do not initialize anything.
+ PageTablePage() = default;
+
+private:
+ std::array<u8, PageSize> m_buffer{};
+};
+static_assert(sizeof(PageTablePage) == PageSize);
+
+} // namespace impl
+
+class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> {
+public:
+ using RefCount = u16;
+ static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
+ static_assert(PageTableSize == PageSize);
+
+public:
+ KPageTableSlabHeap() = default;
+
+ static constexpr size_t CalculateReferenceCountSize(size_t size) {
+ return (size / PageSize) * sizeof(RefCount);
+ }
+
+ void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) {
+ BaseHeap::Initialize(page_allocator, object_count);
+ this->Initialize(rc);
+ }
+
+ RefCount GetRefCount(VAddr addr) {
+ ASSERT(this->IsInRange(addr));
+ return *this->GetRefCountPointer(addr);
+ }
+
+ void Open(VAddr addr, int count) {
+ ASSERT(this->IsInRange(addr));
+
+ *this->GetRefCountPointer(addr) += static_cast<RefCount>(count);
+
+ ASSERT(this->GetRefCount(addr) > 0);
+ }
+
+ bool Close(VAddr addr, int count) {
+ ASSERT(this->IsInRange(addr));
+ ASSERT(this->GetRefCount(addr) >= count);
+
+ *this->GetRefCountPointer(addr) -= static_cast<RefCount>(count);
+ return this->GetRefCount(addr) == 0;
+ }
+
+ bool IsInPageTableHeap(VAddr addr) const {
+ return this->IsInRange(addr);
+ }
+
+private:
+ void Initialize([[maybe_unused]] RefCount* rc) {
+ // TODO(bunnei): Use rc once we support kernel virtual memory allocations.
+ const auto count = this->GetSize() / PageSize;
+ m_ref_counts.resize(count);
+
+ for (size_t i = 0; i < count; i++) {
+ m_ref_counts[i] = 0;
+ }
+ }
+
+ RefCount* GetRefCountPointer(VAddr addr) {
+ return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize);
+ }
+
+private:
+ using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>;
+
+ std::vector<RefCount> m_ref_counts;
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 8c3495e5a..4ddeea73b 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -358,8 +358,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
}
// Initialize proces address space
if (const Result result{page_table.InitializeForProcess(
- metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
- &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
+ metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
+ 0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)};
result.IsError()) {
R_RETURN(result);
}
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
new file mode 100644
index 000000000..4cc377a6c
--- /dev/null
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -0,0 +1,26 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/hle/kernel/k_system_resource.h"
+
+namespace Kernel {
+
+Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size,
+ [[maybe_unused]] KResourceLimit* resource_limit,
+ [[maybe_unused]] KMemoryManager::Pool pool) {
+ // Unimplemented
+ UNREACHABLE();
+}
+
+void KSecureSystemResource::Finalize() {
+ // Unimplemented
+ UNREACHABLE();
+}
+
+size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(
+ [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) {
+ // Unimplemented
+ UNREACHABLE();
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h
new file mode 100644
index 000000000..9a991f725
--- /dev/null
+++ b/src/core/hle/kernel/k_system_resource.h
@@ -0,0 +1,137 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "core/hle/kernel/k_auto_object.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
+#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_page_table_manager.h"
+#include "core/hle/kernel/k_resource_limit.h"
+#include "core/hle/kernel/slab_helpers.h"
+
+namespace Kernel {
+
+// NOTE: Nintendo's implementation does not have the "is_secure_resource" field, and instead uses
+// virtual IsSecureResource().
+
+class KSystemResource : public KAutoObject {
+ KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject);
+
+public:
+ explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {}
+
+protected:
+ void SetSecureResource() {
+ m_is_secure_resource = true;
+ }
+
+public:
+ virtual void Destroy() override {
+ UNREACHABLE_MSG("KSystemResource::Destroy() was called");
+ }
+
+ bool IsSecureResource() const {
+ return m_is_secure_resource;
+ }
+
+ void SetManagers(KMemoryBlockSlabManager& mb, KBlockInfoManager& bi, KPageTableManager& pt) {
+ ASSERT(m_p_memory_block_slab_manager == nullptr);
+ ASSERT(m_p_block_info_manager == nullptr);
+ ASSERT(m_p_page_table_manager == nullptr);
+
+ m_p_memory_block_slab_manager = std::addressof(mb);
+ m_p_block_info_manager = std::addressof(bi);
+ m_p_page_table_manager = std::addressof(pt);
+ }
+
+ const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
+ return *m_p_memory_block_slab_manager;
+ }
+ const KBlockInfoManager& GetBlockInfoManager() const {
+ return *m_p_block_info_manager;
+ }
+ const KPageTableManager& GetPageTableManager() const {
+ return *m_p_page_table_manager;
+ }
+
+ KMemoryBlockSlabManager& GetMemoryBlockSlabManager() {
+ return *m_p_memory_block_slab_manager;
+ }
+ KBlockInfoManager& GetBlockInfoManager() {
+ return *m_p_block_info_manager;
+ }
+ KPageTableManager& GetPageTableManager() {
+ return *m_p_page_table_manager;
+ }
+
+ KMemoryBlockSlabManager* GetMemoryBlockSlabManagerPointer() {
+ return m_p_memory_block_slab_manager;
+ }
+ KBlockInfoManager* GetBlockInfoManagerPointer() {
+ return m_p_block_info_manager;
+ }
+ KPageTableManager* GetPageTableManagerPointer() {
+ return m_p_page_table_manager;
+ }
+
+private:
+ KMemoryBlockSlabManager* m_p_memory_block_slab_manager{};
+ KBlockInfoManager* m_p_block_info_manager{};
+ KPageTableManager* m_p_page_table_manager{};
+ bool m_is_secure_resource{false};
+};
+
+class KSecureSystemResource final
+ : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> {
+public:
+ explicit KSecureSystemResource(KernelCore& kernel_)
+ : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) {
+ // Mark ourselves as being a secure resource.
+ this->SetSecureResource();
+ }
+
+ Result Initialize(size_t size, KResourceLimit* resource_limit, KMemoryManager::Pool pool);
+ void Finalize();
+
+ bool IsInitialized() const {
+ return m_is_initialized;
+ }
+ static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
+
+ size_t CalculateRequiredSecureMemorySize() const {
+ return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool);
+ }
+
+ size_t GetSize() const {
+ return m_resource_size;
+ }
+ size_t GetUsedSize() const {
+ return m_dynamic_page_manager.GetUsed() * PageSize;
+ }
+
+ const KDynamicPageManager& GetDynamicPageManager() const {
+ return m_dynamic_page_manager;
+ }
+
+public:
+ static size_t CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool);
+
+private:
+ bool m_is_initialized{};
+ KMemoryManager::Pool m_resource_pool{};
+ KDynamicPageManager m_dynamic_page_manager;
+ KMemoryBlockSlabManager m_memory_block_slab_manager;
+ KBlockInfoManager m_block_info_manager;
+ KPageTableManager m_page_table_manager;
+ KMemoryBlockSlabHeap m_memory_block_heap;
+ KBlockInfoSlabHeap m_block_info_heap;
+ KPageTableSlabHeap m_page_table_heap;
+ KResourceLimit* m_resource_limit{};
+ VAddr m_resource_address{};
+ size_t m_resource_size{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 09c36ee09..47b760a9c 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -28,10 +28,12 @@
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_shared_memory.h"
+#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
@@ -47,6 +49,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
namespace Kernel {
struct KernelCore::Impl {
+ static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
+ static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
+ static constexpr size_t BlockInfoSlabHeapSize = 4000;
+ static constexpr size_t ReservedDynamicPageCount = 64;
+
explicit Impl(Core::System& system_, KernelCore& kernel_)
: time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"},
service_thread_barrier{2}, system{system_} {}
@@ -71,7 +78,6 @@ struct KernelCore::Impl {
// Initialize kernel memory and resources.
InitializeSystemResourceLimit(kernel, system.CoreTiming());
InitializeMemoryLayout();
- Init::InitializeKPageBufferSlabHeap(system);
InitializeShutdownThreads();
InitializePhysicalCores();
InitializePreemption(kernel);
@@ -81,7 +87,8 @@ struct KernelCore::Impl {
const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
ASSERT(pt_heap_region.GetEndAddress() != 0);
- InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
+ InitializeResourceManagers(kernel, pt_heap_region.GetAddress(),
+ pt_heap_region.GetSize());
}
RegisterHostThread();
@@ -253,16 +260,82 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
- void InitializeResourceManagers(VAddr address, size_t size) {
- dynamic_page_manager = std::make_unique<KDynamicPageManager>();
- memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
+ void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) {
+ // Ensure that the buffer is suitable for our use.
+ ASSERT(Common::IsAligned(address, PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+
+ // Ensure that we have space for our reference counts.
+ const size_t rc_size =
+ Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize);
+ ASSERT(rc_size < size);
+ size -= rc_size;
+
+ // Initialize the resource managers' shared page manager.
+ resource_manager_page_manager = std::make_unique<KDynamicPageManager>();
+ resource_manager_page_manager->Initialize(
+ address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize));
+
+ // Initialize the KPageBuffer slab heap.
+ page_buffer_slab_heap.Initialize(system);
+
+ // Initialize the fixed-size slabheaps.
+ app_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
+ sys_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
+ block_info_heap = std::make_unique<KBlockInfoSlabHeap>();
+ app_memory_block_heap->Initialize(resource_manager_page_manager.get(),
+ ApplicationMemoryBlockSlabHeapSize);
+ sys_memory_block_heap->Initialize(resource_manager_page_manager.get(),
+ SystemMemoryBlockSlabHeapSize);
+ block_info_heap->Initialize(resource_manager_page_manager.get(), BlockInfoSlabHeapSize);
+
+ // Reserve all but a fixed number of remaining pages for the page table heap.
+ const size_t num_pt_pages = resource_manager_page_manager->GetCount() -
+ resource_manager_page_manager->GetUsed() -
+ ReservedDynamicPageCount;
+ page_table_heap = std::make_unique<KPageTableSlabHeap>();
+
+ // TODO(bunnei): Pass in address once we support kernel virtual memory allocations.
+ page_table_heap->Initialize(
+ resource_manager_page_manager.get(), num_pt_pages,
+ /*GetPointer<KPageTableManager::RefCount>(address + size)*/ nullptr);
+
+ // Setup the slab managers.
+ KDynamicPageManager* const app_dynamic_page_manager = nullptr;
+ KDynamicPageManager* const sys_dynamic_page_manager =
+ /*KTargetSystem::IsDynamicResourceLimitsEnabled()*/ true
+ ? resource_manager_page_manager.get()
+ : nullptr;
app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
-
- dynamic_page_manager->Initialize(address, size);
- static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
- memory_block_heap->Initialize(dynamic_page_manager.get(),
- ApplicationMemoryBlockSlabHeapSize);
- app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
+ sys_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
+ app_block_info_manager = std::make_unique<KBlockInfoManager>();
+ sys_block_info_manager = std::make_unique<KBlockInfoManager>();
+ app_page_table_manager = std::make_unique<KPageTableManager>();
+ sys_page_table_manager = std::make_unique<KPageTableManager>();
+
+ app_memory_block_manager->Initialize(app_dynamic_page_manager, app_memory_block_heap.get());
+ sys_memory_block_manager->Initialize(sys_dynamic_page_manager, sys_memory_block_heap.get());
+
+ app_block_info_manager->Initialize(app_dynamic_page_manager, block_info_heap.get());
+ sys_block_info_manager->Initialize(sys_dynamic_page_manager, block_info_heap.get());
+
+ app_page_table_manager->Initialize(app_dynamic_page_manager, page_table_heap.get());
+ sys_page_table_manager->Initialize(sys_dynamic_page_manager, page_table_heap.get());
+
+ // Check that we have the correct number of dynamic pages available.
+ ASSERT(resource_manager_page_manager->GetCount() -
+ resource_manager_page_manager->GetUsed() ==
+ ReservedDynamicPageCount);
+
+ // Create the system page table managers.
+ app_system_resource = std::make_unique<KSystemResource>(kernel);
+ sys_system_resource = std::make_unique<KSystemResource>(kernel);
+
+ // Set the managers for the system resources.
+ app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
+ *app_page_table_manager);
+ sys_system_resource->SetManagers(*sys_memory_block_manager, *sys_block_info_manager,
+ *sys_page_table_manager);
}
void InitializeShutdownThreads() {
@@ -446,6 +519,9 @@ struct KernelCore::Impl {
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
+ // Determine if we'll use extra thread resources.
+ const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
+
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
@@ -456,7 +532,8 @@ struct KernelCore::Impl {
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
- const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit();
+ const size_t resource_region_size =
+ memory_layout->GetResourceRegionSizeForInit(use_extra_resources);
// Determine the size of the slab region.
const size_t slab_region_size =
@@ -751,6 +828,8 @@ struct KernelCore::Impl {
Init::KSlabResourceCounts slab_resource_counts{};
KResourceLimit* system_resource_limit{};
+ KPageBufferSlabHeap page_buffer_slab_heap;
+
std::shared_ptr<Core::Timing::EventType> preemption_event;
// This is the kernel's handle table or supervisor handle table which
@@ -776,10 +855,20 @@ struct KernelCore::Impl {
// Kernel memory management
std::unique_ptr<KMemoryManager> memory_manager;
- // Dynamic slab managers
- std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
- std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
+ // Resource managers
+ std::unique_ptr<KDynamicPageManager> resource_manager_page_manager;
+ std::unique_ptr<KPageTableSlabHeap> page_table_heap;
+ std::unique_ptr<KMemoryBlockSlabHeap> app_memory_block_heap;
+ std::unique_ptr<KMemoryBlockSlabHeap> sys_memory_block_heap;
+ std::unique_ptr<KBlockInfoSlabHeap> block_info_heap;
+ std::unique_ptr<KPageTableManager> app_page_table_manager;
+ std::unique_ptr<KPageTableManager> sys_page_table_manager;
std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
+ std::unique_ptr<KMemoryBlockSlabManager> sys_memory_block_manager;
+ std::unique_ptr<KBlockInfoManager> app_block_info_manager;
+ std::unique_ptr<KBlockInfoManager> sys_block_info_manager;
+ std::unique_ptr<KSystemResource> app_system_resource;
+ std::unique_ptr<KSystemResource> sys_system_resource;
// Shared memory for services
Kernel::KSharedMemory* hid_shared_mem{};
@@ -1057,12 +1146,12 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
-KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
- return *impl->app_memory_block_manager;
+KSystemResource& KernelCore::GetSystemSystemResource() {
+ return *impl->sys_system_resource;
}
-const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
- return *impl->app_memory_block_manager;
+const KSystemResource& KernelCore::GetSystemSystemResource() const {
+ return *impl->sys_system_resource;
}
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 4ae6b3923..caca60586 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -34,13 +34,16 @@ class KClientPort;
class GlobalSchedulerContext;
class KAutoObjectWithListContainer;
class KClientSession;
+class KDebug;
+class KDynamicPageManager;
class KEvent;
+class KEventInfo;
class KHandleTable;
class KLinkedListNode;
-class KMemoryBlockSlabManager;
class KMemoryLayout;
class KMemoryManager;
class KPageBuffer;
+class KPageBufferSlabHeap;
class KPort;
class KProcess;
class KResourceLimit;
@@ -51,6 +54,7 @@ class KSession;
class KSessionRequest;
class KSharedMemory;
class KSharedMemoryInfo;
+class KSecureSystemResource;
class KThread;
class KThreadLocalPage;
class KTransferMemory;
@@ -244,11 +248,11 @@ public:
/// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const;
- /// Gets the application memory block manager for the kernel.
- KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
+ /// Gets the system resource manager.
+ KSystemResource& GetSystemSystemResource();
- /// Gets the application memory block manager for the kernel.
- const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
+ /// Gets the system resource manager.
+ const KSystemResource& GetSystemSystemResource() const;
/// Gets the shared memory object for HID services.
Kernel::KSharedMemory& GetHidSharedMem();
@@ -364,6 +368,12 @@ public:
return slab_heap_container->thread_local_page;
} else if constexpr (std::is_same_v<T, KSessionRequest>) {
return slab_heap_container->session_request;
+ } else if constexpr (std::is_same_v<T, KSecureSystemResource>) {
+ return slab_heap_container->secure_system_resource;
+ } else if constexpr (std::is_same_v<T, KEventInfo>) {
+ return slab_heap_container->event_info;
+ } else if constexpr (std::is_same_v<T, KDebug>) {
+ return slab_heap_container->debug;
}
}
@@ -427,6 +437,9 @@ private:
KSlabHeap<KPageBuffer> page_buffer;
KSlabHeap<KThreadLocalPage> thread_local_page;
KSlabHeap<KSessionRequest> session_request;
+ KSlabHeap<KSecureSystemResource> secure_system_resource;
+ KSlabHeap<KEventInfo> event_info;
+ KSlabHeap<KDebug> debug;
};
std::unique_ptr<SlabHeapContainer> slab_heap_container;
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index 06b51e919..0228ce188 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -53,6 +53,84 @@ public:
};
template <typename Derived, typename Base>
+class KAutoObjectWithSlabHeap : public Base {
+ static_assert(std::is_base_of<KAutoObject, Base>::value);
+
+private:
+ static Derived* Allocate(KernelCore& kernel) {
+ return kernel.SlabHeap<Derived>().Allocate(kernel);
+ }
+
+ static void Free(KernelCore& kernel, Derived* obj) {
+ kernel.SlabHeap<Derived>().Free(obj);
+ }
+
+public:
+ explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
+ virtual ~KAutoObjectWithSlabHeap() = default;
+
+ virtual void Destroy() override {
+ const bool is_initialized = this->IsInitialized();
+ uintptr_t arg = 0;
+ if (is_initialized) {
+ arg = this->GetPostDestroyArgument();
+ this->Finalize();
+ }
+ Free(kernel, static_cast<Derived*>(this));
+ if (is_initialized) {
+ Derived::PostDestroy(arg);
+ }
+ }
+
+ virtual bool IsInitialized() const {
+ return true;
+ }
+ virtual uintptr_t GetPostDestroyArgument() const {
+ return 0;
+ }
+
+ size_t GetSlabIndex() const {
+ return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
+ }
+
+public:
+ static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) {
+ kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
+ }
+
+ static Derived* Create(KernelCore& kernel) {
+ Derived* obj = Allocate(kernel);
+ if (obj != nullptr) {
+ KAutoObject::Create(obj);
+ }
+ return obj;
+ }
+
+ static size_t GetObjectSize(KernelCore& kernel) {
+ return kernel.SlabHeap<Derived>().GetObjectSize();
+ }
+
+ static size_t GetSlabHeapSize(KernelCore& kernel) {
+ return kernel.SlabHeap<Derived>().GetSlabHeapSize();
+ }
+
+ static size_t GetPeakIndex(KernelCore& kernel) {
+ return kernel.SlabHeap<Derived>().GetPeakIndex();
+ }
+
+ static uintptr_t GetSlabHeapAddress(KernelCore& kernel) {
+ return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
+ }
+
+ static size_t GetNumRemaining(KernelCore& kernel) {
+ return kernel.SlabHeap<Derived>().GetNumRemaining();
+ }
+
+protected:
+ KernelCore& kernel;
+};
+
+template <typename Derived, typename Base>
class KAutoObjectWithSlabHeapAndContainer : public Base {
static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 4c819f4b6..ecac97a52 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -2247,7 +2247,7 @@ static u64 GetSystemTick(Core::System& system) {
auto& core_timing = system.CoreTiming();
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
- const u64 result{system.CoreTiming().GetClockTicks()};
+ const u64 result{core_timing.GetClockTicks()};
if (!system.Kernel().IsMulticore()) {
core_timing.AddTicks(400U);
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index f27cade33..b7ca53085 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
+constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259};
constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index abb9847fe..9b0305552 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -22,8 +22,8 @@ enum class MemoryState : u32 {
Ipc = 0x0A,
Stack = 0x0B,
ThreadLocal = 0x0C,
- Transferred = 0x0D,
- SharedTransferred = 0x0E,
+ Transfered = 0x0D,
+ SharedTransfered = 0x0E,
SharedCode = 0x0F,
Inaccessible = 0x10,
NonSecureIpc = 0x11,
@@ -32,6 +32,7 @@ enum class MemoryState : u32 {
GeneratedCode = 0x14,
CodeOut = 0x15,
Coverage = 0x16,
+ Insecure = 0x17,
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
@@ -83,6 +84,13 @@ enum class YieldType : s64 {
ToAnyThread = -2,
};
+enum class ThreadExitReason : u32 {
+ ExitThread = 0,
+ TerminateThread = 1,
+ ExitProcess = 2,
+ TerminateProcess = 3,
+};
+
enum class ThreadActivity : u32 {
Runnable = 0,
Paused = 1,
@@ -108,6 +116,34 @@ enum class ProcessState : u32 {
DebugBreak = 7,
};
+enum class ProcessExitReason : u32 {
+ ExitProcess = 0,
+ TerminateProcess = 1,
+ Exception = 2,
+};
+
constexpr inline size_t ThreadLocalRegionSize = 0x200;
+// Debug types.
+enum class DebugEvent : u32 {
+ CreateProcess = 0,
+ CreateThread = 1,
+ ExitProcess = 2,
+ ExitThread = 3,
+ Exception = 4,
+};
+
+enum class DebugException : u32 {
+ UndefinedInstruction = 0,
+ InstructionAbort = 1,
+ DataAbort = 2,
+ AlignmentFault = 3,
+ DebuggerAttached = 4,
+ BreakPoint = 5,
+ UserBreak = 6,
+ DebuggerBreak = 7,
+ UndefinedSystemCall = 8,
+ MemorySystemError = 9,
+};
+
} // namespace Kernel::Svc
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index ef4b2d417..56c990728 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -423,16 +423,17 @@ constexpr void UpdateCurrentResultReference<const Result>(Result result_referenc
} // namespace ResultImpl
#define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \
- [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \
+ [[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) = \
std::same_as<decltype(__TmpCurrentResultReference), Result&>; \
- [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \
- [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \
- Result& __TmpCurrentResultReference = \
- HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE
+ [[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference; \
+ [[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess; \
+ Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE) \
+ ? CONCAT2(PrevRef_, COUNTER_VALUE) \
+ : CONCAT2(__tmp_result_, COUNTER_VALUE)
#define ON_RESULT_RETURN_IMPL(...) \
static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \
- auto RESULT_GUARD_STATE_##__COUNTER__ = \
+ auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) = \
ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \
__TmpCurrentResultReference) + \
[&]()
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 44388655d..fa29db758 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
return result;
}
+ bool is_out_io{};
ASSERT(system.CurrentProcess()
->PageTable()
- .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
- Kernel::KMemoryPermission::None, true)
+ .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
+ handle_description->size,
+ Kernel::KMemoryPermission::None, true, false)
.IsSuccess());
std::memcpy(output.data(), &params, sizeof(params));
return result;