summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/host_memory.cpp4
-rw-r--r--src/common/logging/backend.cpp18
-rw-r--r--src/common/page_table.cpp58
-rw-r--r--src/common/page_table.h24
-rw-r--r--src/common/settings.cpp2
-rw-r--r--src/common/settings.h4
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp12
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp13
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.cpp4
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.h2
-rw-r--r--src/core/arm/exclusive_monitor.h2
-rw-r--r--src/core/core.cpp11
-rw-r--r--src/core/device_memory.cpp5
-rw-r--r--src/core/device_memory.h4
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h13
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp36
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h1
-rw-r--r--src/core/hle/kernel/initial_process.h23
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp4
-rw-r--r--src/core/hle/kernel/k_event.cpp14
-rw-r--r--src/core/hle/kernel/k_event.h2
-rw-r--r--src/core/hle/kernel/k_memory_layout.h4
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp469
-rw-r--r--src/core/hle/kernel/k_memory_manager.h167
-rw-r--r--src/core/hle/kernel/k_memory_region_type.h10
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp126
-rw-r--r--src/core/hle/kernel/k_page_heap.h221
-rw-r--r--src/core/hle/kernel/k_page_table.cpp598
-rw-r--r--src/core/hle/kernel/k_page_table.h9
-rw-r--r--src/core/hle/kernel/k_process.cpp11
-rw-r--r--src/core/hle/kernel/k_process.h2
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp19
-rw-r--r--src/core/hle/kernel/k_resource_limit.h3
-rw-r--r--src/core/hle/kernel/kernel.cpp120
-rw-r--r--src/core/hle/kernel/kernel.h4
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/service/am/am.cpp6
-rw-r--r--src/core/hle/service/am/am.h36
-rw-r--r--src/core/hle/service/apm/apm_controller.cpp10
-rw-r--r--src/core/hle/service/apm/apm_controller.h15
-rw-r--r--src/core/hle/service/kernel_helpers.cpp17
-rw-r--r--src/core/memory.cpp3
-rw-r--r--src/input_common/drivers/sdl_driver.cpp17
-rw-r--r--src/input_common/drivers/udp_client.cpp14
-rw-r--r--src/input_common/drivers/udp_client.h2
-rw-r--r--src/input_common/input_engine.cpp8
-rw-r--r--src/input_common/input_engine.h18
-rw-r--r--src/input_common/input_poller.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.cpp46
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.h7
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h2
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp6
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp37
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h5
-rw-r--r--src/yuzu/configuration/config.cpp6
-rw-r--r--src/yuzu/configuration/configure_cpu.cpp9
-rw-r--r--src/yuzu/configuration/configure_cpu.h1
-rw-r--r--src/yuzu/configuration/configure_cpu.ui12
-rw-r--r--src/yuzu/configuration/configure_cpu_debug.cpp8
-rw-r--r--src/yuzu/configuration/configure_cpu_debug.ui29
-rw-r--r--src/yuzu/configuration/configure_general.cpp9
-rw-r--r--src/yuzu/configuration/configure_general.h1
-rw-r--r--src/yuzu/configuration/configure_general.ui7
-rw-r--r--src/yuzu/main.cpp10
-rw-r--r--src/yuzu_cmd/config.cpp4
-rw-r--r--src/yuzu_cmd/default_ini.h13
70 files changed, 1730 insertions, 660 deletions
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index 28949fe5e..c465cfc14 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -327,8 +327,8 @@ private:
bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const {
const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length});
if (it != placeholders.end() && it->lower() == virtual_offset + length) {
- const bool is_root = it == placeholders.begin() && virtual_offset == 0;
- return is_root || std::prev(it)->upper() == virtual_offset;
+ return it == placeholders.begin() ? virtual_offset == 0
+ : std::prev(it)->upper() == virtual_offset;
}
return false;
}
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index c51c05b28..f1c9ed6c4 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -218,19 +218,17 @@ private:
Impl(const std::filesystem::path& file_backend_filename, const Filter& filter_)
: filter{filter_}, file_backend{file_backend_filename} {}
- ~Impl() {
- StopBackendThread();
- }
+ ~Impl() = default;
void StartBackendThread() {
- backend_thread = std::thread([this] {
+ backend_thread = std::jthread([this](std::stop_token stop_token) {
Common::SetCurrentThreadName("yuzu:Log");
Entry entry;
const auto write_logs = [this, &entry]() {
ForEachBackend([&entry](Backend& backend) { backend.Write(entry); });
};
- while (!stop.stop_requested()) {
- entry = message_queue.PopWait(stop.get_token());
+ while (!stop_token.stop_requested()) {
+ entry = message_queue.PopWait(stop_token);
if (entry.filename != nullptr) {
write_logs();
}
@@ -244,11 +242,6 @@ private:
});
}
- void StopBackendThread() {
- stop.request_stop();
- backend_thread.join();
- }
-
Entry CreateEntry(Class log_class, Level log_level, const char* filename, unsigned int line_nr,
const char* function, std::string&& message) const {
using std::chrono::duration_cast;
@@ -283,8 +276,7 @@ private:
ColorConsoleBackend color_console_backend{};
FileBackend file_backend;
- std::stop_source stop;
- std::thread backend_thread;
+ std::jthread backend_thread;
MPSCQueue<Entry, true> message_queue{};
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
};
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
index 9fffd816f..4817b09f9 100644
--- a/src/common/page_table.cpp
+++ b/src/common/page_table.cpp
@@ -10,11 +10,65 @@ PageTable::PageTable() = default;
PageTable::~PageTable() noexcept = default;
-void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
- const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
+bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
+ u64 address) const {
+ // Setup invalid defaults.
+ out_entry.phys_addr = 0;
+ out_entry.block_size = page_size;
+ out_context.next_page = 0;
+
+ // Validate that we can read the actual entry.
+ const auto page = address / page_size;
+ if (page >= backing_addr.size()) {
+ return false;
+ }
+
+ // Validate that the entry is mapped.
+ const auto phys_addr = backing_addr[page];
+ if (phys_addr == 0) {
+ return false;
+ }
+
+ // Populate the results.
+ out_entry.phys_addr = phys_addr + address;
+ out_context.next_page = page + 1;
+ out_context.next_offset = address + page_size;
+
+ return true;
+}
+
+bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const {
+ // Setup invalid defaults.
+ out_entry.phys_addr = 0;
+ out_entry.block_size = page_size;
+
+ // Validate that we can read the actual entry.
+ const auto page = context.next_page;
+ if (page >= backing_addr.size()) {
+ return false;
+ }
+
+ // Validate that the entry is mapped.
+ const auto phys_addr = backing_addr[page];
+ if (phys_addr == 0) {
+ return false;
+ }
+
+ // Populate the results.
+ out_entry.phys_addr = phys_addr + context.next_offset;
+ context.next_page = page + 1;
+ context.next_offset += page_size;
+
+ return true;
+}
+
+void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) {
+ const std::size_t num_page_table_entries{1ULL
+ << (address_space_width_in_bits - page_size_in_bits)};
pointers.resize(num_page_table_entries);
backing_addr.resize(num_page_table_entries);
current_address_space_width_in_bits = address_space_width_in_bits;
+ page_size = 1ULL << page_size_in_bits;
}
} // namespace Common
diff --git a/src/common/page_table.h b/src/common/page_table.h
index 8267e8b4d..82d91e9f3 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -27,6 +27,16 @@ enum class PageType : u8 {
* mimics the way a real CPU page table works.
*/
struct PageTable {
+ struct TraversalEntry {
+ u64 phys_addr{};
+ std::size_t block_size{};
+ };
+
+ struct TraversalContext {
+ u64 next_page{};
+ u64 next_offset{};
+ };
+
/// Number of bits reserved for attribute tagging.
/// This can be at most the guaranteed alignment of the pointers in the page table.
static constexpr int ATTRIBUTE_BITS = 2;
@@ -89,6 +99,10 @@ struct PageTable {
PageTable(PageTable&&) noexcept = default;
PageTable& operator=(PageTable&&) noexcept = default;
+ bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
+ u64 address) const;
+ bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const;
+
/**
* Resizes the page table to be able to accommodate enough pages within
* a given address space.
@@ -96,9 +110,9 @@ struct PageTable {
* @param address_space_width_in_bits The address size width in bits.
* @param page_size_in_bits The page size in bits.
*/
- void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
+ void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits);
- size_t GetAddressSpaceBits() const {
+ std::size_t GetAddressSpaceBits() const {
return current_address_space_width_in_bits;
}
@@ -110,9 +124,11 @@ struct PageTable {
VirtualBuffer<u64> backing_addr;
- size_t current_address_space_width_in_bits;
+ std::size_t current_address_space_width_in_bits{};
+
+ u8* fastmem_arena{};
- u8* fastmem_arena;
+ std::size_t page_size{};
};
} // namespace Common
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index 6964a8273..877e0faa4 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -167,6 +167,7 @@ void RestoreGlobalState(bool is_powered_on) {
// Core
values.use_multi_core.SetGlobal(true);
+ values.use_extended_memory_layout.SetGlobal(true);
// CPU
values.cpu_accuracy.SetGlobal(true);
@@ -175,6 +176,7 @@ void RestoreGlobalState(bool is_powered_on) {
values.cpuopt_unsafe_ignore_standard_fpcr.SetGlobal(true);
values.cpuopt_unsafe_inaccurate_nan.SetGlobal(true);
values.cpuopt_unsafe_fastmem_check.SetGlobal(true);
+ values.cpuopt_unsafe_ignore_global_monitor.SetGlobal(true);
// Renderer
values.renderer_backend.SetGlobal(true);
diff --git a/src/common/settings.h b/src/common/settings.h
index 9bee6e10f..a37d83fb3 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -466,6 +466,7 @@ struct Values {
// Core
Setting<bool> use_multi_core{true, "use_multi_core"};
+ Setting<bool> use_extended_memory_layout{false, "use_extended_memory_layout"};
// Cpu
RangedSetting<CPUAccuracy> cpu_accuracy{CPUAccuracy::Auto, CPUAccuracy::Auto,
@@ -483,12 +484,15 @@ struct Values {
BasicSetting<bool> cpuopt_misc_ir{true, "cpuopt_misc_ir"};
BasicSetting<bool> cpuopt_reduce_misalign_checks{true, "cpuopt_reduce_misalign_checks"};
BasicSetting<bool> cpuopt_fastmem{true, "cpuopt_fastmem"};
+ BasicSetting<bool> cpuopt_fastmem_exclusives{true, "cpuopt_fastmem_exclusives"};
+ BasicSetting<bool> cpuopt_recompile_exclusives{true, "cpuopt_recompile_exclusives"};
Setting<bool> cpuopt_unsafe_unfuse_fma{true, "cpuopt_unsafe_unfuse_fma"};
Setting<bool> cpuopt_unsafe_reduce_fp_error{true, "cpuopt_unsafe_reduce_fp_error"};
Setting<bool> cpuopt_unsafe_ignore_standard_fpcr{true, "cpuopt_unsafe_ignore_standard_fpcr"};
Setting<bool> cpuopt_unsafe_inaccurate_nan{true, "cpuopt_unsafe_inaccurate_nan"};
Setting<bool> cpuopt_unsafe_fastmem_check{true, "cpuopt_unsafe_fastmem_check"};
+ Setting<bool> cpuopt_unsafe_ignore_global_monitor{true, "cpuopt_unsafe_ignore_global_monitor"};
// Renderer
RangedSetting<RendererBackend> renderer_backend{
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 0c10cd019..5db6a1b3a 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -152,6 +152,7 @@ add_library(core STATIC
hle/api_version.h
hle/ipc.h
hle/ipc_helpers.h
+ hle/kernel/board/nintendo/nx/k_memory_layout.h
hle/kernel/board/nintendo/nx/k_system_control.cpp
hle/kernel/board/nintendo/nx/k_system_control.h
hle/kernel/board/nintendo/nx/secure_monitor.h
@@ -164,6 +165,7 @@ add_library(core STATIC
hle/kernel/hle_ipc.h
hle/kernel/init/init_slab_setup.cpp
hle/kernel/init/init_slab_setup.h
+ hle/kernel/initial_process.h
hle/kernel/k_address_arbiter.cpp
hle/kernel/k_address_arbiter.h
hle/kernel/k_address_space_info.cpp
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index b0d89c539..286976623 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -137,6 +137,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
+ config.fastmem_exclusive_access = true;
+ config.recompile_on_exclusive_fastmem_failure = true;
// Multi-process state
config.processor_id = core_index;
@@ -178,6 +180,12 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
if (!Settings::values.cpuopt_fastmem) {
config.fastmem_pointer = nullptr;
}
+ if (!Settings::values.cpuopt_fastmem_exclusives) {
+ config.fastmem_exclusive_access = false;
+ }
+ if (!Settings::values.cpuopt_recompile_exclusives) {
+ config.recompile_on_exclusive_fastmem_failure = false;
+ }
}
// Unsafe optimizations
@@ -195,6 +203,9 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
}
+ if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
+ }
}
// Curated optimizations
@@ -203,6 +214,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreStandardFPCRValue;
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
}
return std::make_unique<Dynarmic::A32::Jit>(config);
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 56836bd05..d96226c41 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -185,6 +185,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
config.fastmem_pointer = page_table->fastmem_arena;
config.fastmem_address_space_bits = address_space_bits;
config.silently_mirror_fastmem = false;
+
+ config.fastmem_exclusive_access = true;
+ config.recompile_on_exclusive_fastmem_failure = true;
}
// Multi-process state
@@ -237,6 +240,12 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
if (!Settings::values.cpuopt_fastmem) {
config.fastmem_pointer = nullptr;
}
+ if (!Settings::values.cpuopt_fastmem_exclusives) {
+ config.fastmem_exclusive_access = false;
+ }
+ if (!Settings::values.cpuopt_recompile_exclusives) {
+ config.recompile_on_exclusive_fastmem_failure = false;
+ }
}
// Unsafe optimizations
@@ -254,6 +263,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
if (Settings::values.cpuopt_unsafe_fastmem_check) {
config.fastmem_address_space_bits = 64;
}
+ if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
+ }
}
// Curated optimizations
@@ -262,6 +274,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
config.fastmem_address_space_bits = 64;
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
}
return std::make_shared<Dynarmic::A64::Jit>(config);
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.cpp b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
index 397d054a8..ea6b224e0 100644
--- a/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp
@@ -37,8 +37,8 @@ u128 DynarmicExclusiveMonitor::ExclusiveRead128(std::size_t core_index, VAddr ad
});
}
-void DynarmicExclusiveMonitor::ClearExclusive() {
- monitor.Clear();
+void DynarmicExclusiveMonitor::ClearExclusive(std::size_t core_index) {
+ monitor.ClearProcessor(core_index);
}
bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.h b/src/core/arm/dynarmic/arm_exclusive_monitor.h
index 265c4ecef..5a15b43ef 100644
--- a/src/core/arm/dynarmic/arm_exclusive_monitor.h
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.h
@@ -29,7 +29,7 @@ public:
u32 ExclusiveRead32(std::size_t core_index, VAddr addr) override;
u64 ExclusiveRead64(std::size_t core_index, VAddr addr) override;
u128 ExclusiveRead128(std::size_t core_index, VAddr addr) override;
- void ClearExclusive() override;
+ void ClearExclusive(std::size_t core_index) override;
bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h
index 62f6e6023..9914ca3da 100644
--- a/src/core/arm/exclusive_monitor.h
+++ b/src/core/arm/exclusive_monitor.h
@@ -23,7 +23,7 @@ public:
virtual u32 ExclusiveRead32(std::size_t core_index, VAddr addr) = 0;
virtual u64 ExclusiveRead64(std::size_t core_index, VAddr addr) = 0;
virtual u128 ExclusiveRead128(std::size_t core_index, VAddr addr) = 0;
- virtual void ClearExclusive() = 0;
+ virtual void ClearExclusive(std::size_t core_index) = 0;
virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0;
virtual bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) = 0;
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 3f9a7f44b..b0cfee3ee 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -28,7 +28,9 @@
#include "core/file_sys/vfs_real.h"
#include "core/hardware_interrupt_manager.h"
#include "core/hid/hid_core.h"
+#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
@@ -252,9 +254,16 @@ struct System::Impl {
}
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
+
+ // Create a resource limit for the process.
+ const auto physical_memory_size =
+ kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
+ auto* resource_limit = Kernel::CreateResourceLimitForProcess(system, physical_memory_size);
+
+ // Create the process.
auto main_process = Kernel::KProcess::Create(system.Kernel());
ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
- Kernel::KProcess::ProcessType::Userland)
+ Kernel::KProcess::ProcessType::Userland, resource_limit)
.IsSuccess());
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) {
diff --git a/src/core/device_memory.cpp b/src/core/device_memory.cpp
index f19c0515f..e6bc63086 100644
--- a/src/core/device_memory.cpp
+++ b/src/core/device_memory.cpp
@@ -3,10 +3,13 @@
// Refer to the license.txt file included.
#include "core/device_memory.h"
+#include "hle/kernel/board/nintendo/nx/k_system_control.h"
namespace Core {
-DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size, 1ULL << 39} {}
+DeviceMemory::DeviceMemory()
+ : buffer{Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize(),
+ 1ULL << 39} {}
DeviceMemory::~DeviceMemory() = default;
} // namespace Core
diff --git a/src/core/device_memory.h b/src/core/device_memory.h
index c4d17705f..daeb551ea 100644
--- a/src/core/device_memory.h
+++ b/src/core/device_memory.h
@@ -12,12 +12,8 @@ namespace Core {
namespace DramMemoryMap {
enum : u64 {
Base = 0x80000000ULL,
- Size = 0x100000000ULL,
- End = Base + Size,
KernelReserveBase = Base + 0x60000,
SlabHeapBase = KernelReserveBase + 0x85000,
- SlapHeapSize = 0xa21000,
- SlabHeapEnd = SlabHeapBase + SlapHeapSize,
};
}; // namespace DramMemoryMap
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
new file mode 100644
index 000000000..01e225088
--- /dev/null
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
@@ -0,0 +1,13 @@
+// Copyright 2022 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+constexpr inline PAddr MainMemoryAddress = 0x80000000;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 6f335c251..8027bec00 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -5,6 +5,7 @@
#include <random>
#include "common/literals.h"
+#include "common/settings.h"
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
@@ -28,33 +29,20 @@ namespace {
using namespace Common::Literals;
-u32 GetMemoryModeForInit() {
- return 0x01;
-}
-
u32 GetMemorySizeForInit() {
- return 0;
+ return Settings::values.use_extended_memory_layout ? Smc::MemorySize_6GB : Smc::MemorySize_4GB;
}
Smc::MemoryArrangement GetMemoryArrangeForInit() {
- switch (GetMemoryModeForInit() & 0x3F) {
- case 0x01:
- default:
- return Smc::MemoryArrangement_4GB;
- case 0x02:
- return Smc::MemoryArrangement_4GBForAppletDev;
- case 0x03:
- return Smc::MemoryArrangement_4GBForSystemDev;
- case 0x11:
- return Smc::MemoryArrangement_6GB;
- case 0x12:
- return Smc::MemoryArrangement_6GBForAppletDev;
- case 0x21:
- return Smc::MemoryArrangement_8GB;
- }
+ return Settings::values.use_extended_memory_layout ? Smc::MemoryArrangement_6GB
+ : Smc::MemoryArrangement_4GB;
}
} // namespace
+size_t KSystemControl::Init::GetRealMemorySize() {
+ return GetIntendedMemorySize();
+}
+
// Initialization.
size_t KSystemControl::Init::GetIntendedMemorySize() {
switch (GetMemorySizeForInit()) {
@@ -69,7 +57,13 @@ size_t KSystemControl::Init::GetIntendedMemorySize() {
}
PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) {
- return base_address;
+ const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
+ const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
+ if (intended_dram_size * 2 < real_dram_size) {
+ return base_address;
+ } else {
+ return base_address + ((real_dram_size - intended_dram_size) / 2);
+ }
}
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index 52f230ced..df2a17f2a 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -13,6 +13,7 @@ public:
class Init {
public:
// Initialization.
+ static std::size_t GetRealMemorySize();
static std::size_t GetIntendedMemorySize();
static PAddr GetKernelPhysicalBaseAddress(u64 base_address);
static bool ShouldIncreaseThreadResourceLimit();
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h
new file mode 100644
index 000000000..25b27909c
--- /dev/null
+++ b/src/core/hle/kernel/initial_process.h
@@ -0,0 +1,23 @@
+// Copyright 2022 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "common/literals.h"
+#include "core/hle/kernel/board/nintendo/nx/k_memory_layout.h"
+#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
+
+namespace Kernel {
+
+using namespace Common::Literals;
+
+constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB;
+
+static inline PAddr GetInitialProcessBinaryPhysicalAddress() {
+ return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress(
+ MainMemoryAddress);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 783c69858..1d1f5e5f8 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -49,7 +49,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
}
} else {
// Otherwise, clear our exclusive hold and finish
- monitor.ClearExclusive();
+ monitor.ClearExclusive(current_core);
}
// We're done.
@@ -78,7 +78,7 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
}
} else {
// Otherwise, clear our exclusive hold and finish.
- monitor.ClearExclusive();
+ monitor.ClearExclusive(current_core);
}
// We're done.
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index 0720efece..2e0e8de80 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -14,7 +14,7 @@ KEvent::KEvent(KernelCore& kernel_)
KEvent::~KEvent() = default;
-void KEvent::Initialize(std::string&& name_) {
+void KEvent::Initialize(std::string&& name_, KProcess* owner_) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both readable and
@@ -30,10 +30,8 @@ void KEvent::Initialize(std::string&& name_) {
writable_event.Initialize(this, name_ + ":Writable");
// Set our owner process.
- owner = kernel.CurrentProcess();
- if (owner) {
- owner->Open();
- }
+ owner = owner_;
+ owner->Open();
// Mark initialized.
name = std::move(name_);
@@ -47,10 +45,8 @@ void KEvent::Finalize() {
void KEvent::PostDestroy(uintptr_t arg) {
// Release the event count resource the owner process holds.
KProcess* owner = reinterpret_cast<KProcess*>(arg);
- if (owner) {
- owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
- owner->Close();
- }
+ owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
+ owner->Close();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 3d3ec99e2..de9732ddf 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -22,7 +22,7 @@ public:
explicit KEvent(KernelCore& kernel_);
~KEvent() override;
- void Initialize(std::string&& name);
+ void Initialize(std::string&& name, KProcess* owner_);
void Finalize() override;
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 57ff538cc..bcddb0d62 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -173,6 +173,10 @@ public:
return Dereference(FindVirtualLinear(address));
}
+ const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const {
+ return Dereference(FindPhysicalLinear(address));
+ }
+
const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const {
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer);
}
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 1b44541b1..a2f18f643 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -10,189 +10,412 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/device_memory.h"
+#include "core/hle/kernel/initial_process.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
+#include "core/memory.h"
namespace Kernel {
-KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {}
+namespace {
+
+constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
+ if ((type | KMemoryRegionType_DramApplicationPool) == type) {
+ return KMemoryManager::Pool::Application;
+ } else if ((type | KMemoryRegionType_DramAppletPool) == type) {
+ return KMemoryManager::Pool::Applet;
+ } else if ((type | KMemoryRegionType_DramSystemPool) == type) {
+ return KMemoryManager::Pool::System;
+ } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
+ return KMemoryManager::Pool::SystemNonSecure;
+ } else {
+ UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
+ return {};
+ }
+}
-std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
- const auto size{end_address - start_address};
+} // namespace
+
+KMemoryManager::KMemoryManager(Core::System& system_)
+ : system{system_}, pool_locks{
+ KLightLock{system_.Kernel()},
+ KLightLock{system_.Kernel()},
+ KLightLock{system_.Kernel()},
+ KLightLock{system_.Kernel()},
+ } {}
+
+void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
+
+ // Clear the management region to zero.
+ const VAddr management_region_end = management_region + management_region_size;
+
+ // Reset our manager count.
+ num_managers = 0;
+
+ // Traverse the virtual memory layout tree, initializing each manager as appropriate.
+ while (num_managers != MaxManagerCount) {
+ // Locate the region that should initialize the current manager.
+ PAddr region_address = 0;
+ size_t region_size = 0;
+ Pool region_pool = Pool::Count;
+ for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
+ // We only care about regions that we need to create managers for.
+ if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
+ continue;
+ }
- // Calculate metadata sizes
- const auto ref_count_size{(size / PageSize) * sizeof(u16)};
- const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
- const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
- const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)};
- const auto total_metadata_size{manager_size + page_heap_size};
- ASSERT(manager_size <= total_metadata_size);
- ASSERT(Common::IsAligned(total_metadata_size, PageSize));
+ // We want to initialize the managers in order.
+ if (it.GetAttributes() != num_managers) {
+ continue;
+ }
- // Setup region
- pool = new_pool;
+ const PAddr cur_start = it.GetAddress();
+ const PAddr cur_end = it.GetEndAddress();
+
+ // Validate the region.
+ ASSERT(cur_end != 0);
+ ASSERT(cur_start != 0);
+ ASSERT(it.GetSize() > 0);
+
+ // Update the region's extents.
+ if (region_address == 0) {
+ region_address = cur_start;
+ region_size = it.GetSize();
+ region_pool = GetPoolFromMemoryRegionType(it.GetType());
+ } else {
+ ASSERT(cur_start == region_address + region_size);
+
+ // Update the size.
+ region_size = cur_end - region_address;
+ ASSERT(GetPoolFromMemoryRegionType(it.GetType()) == region_pool);
+ }
+ }
+
+ // If we didn't find a region, we're done.
+ if (region_size == 0) {
+ break;
+ }
- // Initialize the manager's KPageHeap
- heap.Initialize(start_address, size, page_heap_size);
+ // Initialize a new manager for the region.
+ Impl* manager = std::addressof(managers[num_managers++]);
+ ASSERT(num_managers <= managers.size());
+
+ const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
+ management_region_end, region_pool);
+ management_region += cur_size;
+ ASSERT(management_region <= management_region_end);
+
+ // Insert the manager into the pool list.
+ const auto region_pool_index = static_cast<u32>(region_pool);
+ if (pool_managers_tail[region_pool_index] == nullptr) {
+ pool_managers_head[region_pool_index] = manager;
+ } else {
+ pool_managers_tail[region_pool_index]->SetNext(manager);
+ manager->SetPrev(pool_managers_tail[region_pool_index]);
+ }
+ pool_managers_tail[region_pool_index] = manager;
+ }
- // Free the memory to the heap
- heap.Free(start_address, size / PageSize);
+ // Free each region to its corresponding heap.
+ size_t reserved_sizes[MaxManagerCount] = {};
+ const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
+ const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
+ const PAddr ini_last = ini_end - 1;
+ for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
+ if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
+ // Get the manager for the region.
+ auto index = it.GetAttributes();
+ auto& manager = managers[index];
+
+ const PAddr cur_start = it.GetAddress();
+ const PAddr cur_last = it.GetLastAddress();
+ const PAddr cur_end = it.GetEndAddress();
+
+ if (cur_start <= ini_start && ini_last <= cur_last) {
+ // Free memory before the ini to the heap.
+ if (cur_start != ini_start) {
+ manager.Free(cur_start, (ini_start - cur_start) / PageSize);
+ }
- // Update the heap's used size
- heap.UpdateUsedSize();
+ // Open/reserve the ini memory.
+ manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
+ reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
- return total_metadata_size;
-}
+ // Free memory after the ini to the heap.
+ if (ini_last != cur_last) {
+ ASSERT(cur_end != 0);
+ manager.Free(ini_end, cur_end - ini_end);
+ }
+ } else {
+ // Ensure there's no partial overlap with the ini image.
+ if (cur_start <= ini_last) {
+ ASSERT(cur_last < ini_start);
+ } else {
+ // Otherwise, check the region for general validity.
+ ASSERT(cur_end != 0);
+ }
-void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
- ASSERT(pool < Pool::Count);
- managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
+ // Free the memory to the heap.
+ manager.Free(cur_start, it.GetSize() / PageSize);
+ }
+ }
+ }
+
+ // Update the used size for all managers.
+ for (size_t i = 0; i < num_managers; ++i) {
+ managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
+ }
}
-VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages,
- u32 option) {
- // Early return if we're allocating no pages
+PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
+ // Early return if we're allocating no pages.
if (num_pages == 0) {
- return {};
+ return 0;
}
- // Lock the pool that we're allocating from
+ // Lock the pool that we're allocating from.
const auto [pool, dir] = DecodeOption(option);
- const auto pool_index{static_cast<std::size_t>(pool)};
- std::lock_guard lock{pool_locks[pool_index]};
-
- // Choose a heap based on our page size request
- const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
-
- // Loop, trying to iterate from each block
- // TODO (bunnei): Support multiple managers
- Impl& chosen_manager{managers[pool_index]};
- VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)};
+ KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
+
+ // Choose a heap based on our page size request.
+ const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
+
+ // Loop, trying to iterate from each block.
+ Impl* chosen_manager = nullptr;
+ PAddr allocated_block = 0;
+ for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
+ chosen_manager = this->GetNextManager(chosen_manager, dir)) {
+ allocated_block = chosen_manager->AllocateBlock(heap_index, true);
+ if (allocated_block != 0) {
+ break;
+ }
+ }
- // If we failed to allocate, quit now
- if (!allocated_block) {
- return {};
+ // If we failed to allocate, quit now.
+ if (allocated_block == 0) {
+ return 0;
}
- // If we allocated more than we need, free some
- const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)};
+ // If we allocated more than we need, free some.
+ const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
if (allocated_pages > num_pages) {
- chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
+ chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
}
+ // Open the first reference to the pages.
+ chosen_manager->OpenFirst(allocated_block, num_pages);
+
return allocated_block;
}
-ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir, u32 heap_fill_value) {
- ASSERT(page_list.GetNumPages() == 0);
+ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
+ Direction dir, bool random) {
+ // Choose a heap based on our page size request.
+ const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
+ R_UNLESS(0 <= heap_index, ResultOutOfMemory);
+
+ // Ensure that we don't leave anything un-freed.
+ auto group_guard = SCOPE_GUARD({
+ for (const auto& it : out->Nodes()) {
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
+ const size_t num_pages_to_free =
+ std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
+ manager.Free(it.GetAddress(), num_pages_to_free);
+ }
+ });
- // Early return if we're allocating no pages
- if (num_pages == 0) {
- return ResultSuccess;
- }
+ // Keep allocating until we've allocated all our pages.
+ for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
+ const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
+ for (Impl* cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr;
+ cur_manager = this->GetNextManager(cur_manager, dir)) {
+ while (num_pages >= pages_per_alloc) {
+ // Allocate a block.
+ PAddr allocated_block = cur_manager->AllocateBlock(index, random);
+ if (allocated_block == 0) {
+ break;
+ }
- // Lock the pool that we're allocating from
- const auto pool_index{static_cast<std::size_t>(pool)};
- std::lock_guard lock{pool_locks[pool_index]};
+ // Safely add it to our group.
+ {
+ auto block_guard =
+ SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
+ R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
+ block_guard.Cancel();
+ }
- // Choose a heap based on our page size request
- const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)};
- if (heap_index < 0) {
- return ResultOutOfMemory;
+ num_pages -= pages_per_alloc;
+ }
+ }
}
- // TODO (bunnei): Support multiple managers
- Impl& chosen_manager{managers[pool_index]};
+ // Only succeed if we allocated as many pages as we wanted.
+ R_UNLESS(num_pages == 0, ResultOutOfMemory);
- // Ensure that we don't leave anything un-freed
- auto group_guard = detail::ScopeExit([&] {
- for (const auto& it : page_list.Nodes()) {
- const auto min_num_pages{std::min<size_t>(
- it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
- chosen_manager.Free(it.GetAddress(), min_num_pages);
- }
- });
+ // We succeeded!
+ group_guard.Cancel();
+ return ResultSuccess;
+}
- // Keep allocating until we've allocated all our pages
- for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
- const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)};
+ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) {
+ ASSERT(out != nullptr);
+ ASSERT(out->GetNumPages() == 0);
- while (num_pages >= pages_per_alloc) {
- // Allocate a block
- VAddr allocated_block{chosen_manager.AllocateBlock(index, false)};
- if (!allocated_block) {
- break;
- }
+ // Early return if we're allocating no pages.
+ R_SUCCEED_IF(num_pages == 0);
- // Safely add it to our group
- {
- auto block_guard = detail::ScopeExit(
- [&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
+ // Lock the pool that we're allocating from.
+ const auto [pool, dir] = DecodeOption(option);
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
+
+ // Allocate the page group.
+ R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
+
+ // Open the first reference to the pages.
+ for (const auto& block : out->Nodes()) {
+ PAddr cur_address = block.GetAddress();
+ size_t remaining_pages = block.GetNumPages();
+ while (remaining_pages > 0) {
+ // Get the manager for the current address.
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
+
+ // Process part or all of the block.
+ const size_t cur_pages =
+ std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
+ manager.OpenFirst(cur_address, cur_pages);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ remaining_pages -= cur_pages;
+ }
+ }
- if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
- result.IsError()) {
- return result;
- }
+ return ResultSuccess;
+}
- block_guard.Cancel();
- }
+ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages,
+ u32 option, u64 process_id, u8 fill_pattern) {
+ ASSERT(out != nullptr);
+ ASSERT(out->GetNumPages() == 0);
- num_pages -= pages_per_alloc;
- }
- }
+ // Decode the option.
+ const auto [pool, dir] = DecodeOption(option);
- // Clear allocated memory.
- for (const auto& it : page_list.Nodes()) {
- std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
- it.GetSize());
+ // Allocate the memory.
+ {
+ // Lock the pool that we're allocating from.
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
+
+ // Allocate the page group.
+ R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
+
+ // Open the first reference to the pages.
+ for (const auto& block : out->Nodes()) {
+ PAddr cur_address = block.GetAddress();
+ size_t remaining_pages = block.GetNumPages();
+ while (remaining_pages > 0) {
+ // Get the manager for the current address.
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
+
+ // Process part or all of the block.
+ const size_t cur_pages =
+ std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
+ manager.OpenFirst(cur_address, cur_pages);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ remaining_pages -= cur_pages;
+ }
+ }
}
- // Only succeed if we allocated as many pages as we wanted
- if (num_pages) {
- return ResultOutOfMemory;
+ // Set all the allocated memory.
+ for (const auto& block : out->Nodes()) {
+ std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
+ block.GetSize());
}
- // We succeeded!
- group_guard.Cancel();
-
return ResultSuccess;
}
-ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir, u32 heap_fill_value) {
- // Early return if we're freeing no pages
- if (!num_pages) {
- return ResultSuccess;
+void KMemoryManager::Open(PAddr address, size_t num_pages) {
+ // Repeatedly open references until we've done so for all pages.
+ while (num_pages) {
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
+ const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
+
+ {
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
+ manager.Open(address, cur_pages);
+ }
+
+ num_pages -= cur_pages;
+ address += cur_pages * PageSize;
}
+}
- // Lock the pool that we're freeing from
- const auto pool_index{static_cast<std::size_t>(pool)};
- std::lock_guard lock{pool_locks[pool_index]};
+void KMemoryManager::Close(PAddr address, size_t num_pages) {
+ // Repeatedly close references until we've done so for all pages.
+ while (num_pages) {
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
+ const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
- // TODO (bunnei): Support multiple managers
- Impl& chosen_manager{managers[pool_index]};
+ {
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
+ manager.Close(address, cur_pages);
+ }
- // Free all of the pages
- for (const auto& it : page_list.Nodes()) {
- const auto min_num_pages{std::min<size_t>(
- it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
- chosen_manager.Free(it.GetAddress(), min_num_pages);
+ num_pages -= cur_pages;
+ address += cur_pages * PageSize;
}
+}
- return ResultSuccess;
+void KMemoryManager::Close(const KPageLinkedList& pg) {
+ for (const auto& node : pg.Nodes()) {
+ Close(node.GetAddress(), node.GetNumPages());
+ }
+}
+void KMemoryManager::Open(const KPageLinkedList& pg) {
+ for (const auto& node : pg.Nodes()) {
+ Open(node.GetAddress(), node.GetNumPages());
+ }
+}
+
+size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
+ VAddr management_end, Pool p) {
+ // Calculate management sizes.
+ const size_t ref_count_size = (size / PageSize) * sizeof(u16);
+ const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
+ const size_t manager_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
+ const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(size);
+ const size_t total_management_size = manager_size + page_heap_size;
+ ASSERT(manager_size <= total_management_size);
+ ASSERT(management + total_management_size <= management_end);
+ ASSERT(Common::IsAligned(total_management_size, PageSize));
+
+ // Setup region.
+ pool = p;
+ management_region = management;
+ page_reference_counts.resize(
+ Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
+ ASSERT(Common::IsAligned(management_region, PageSize));
+
+ // Initialize the manager's KPageHeap.
+ heap.Initialize(address, size, management + manager_size, page_heap_size);
+
+ return total_management_size;
}
-std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) {
- const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
- const std::size_t optimize_map_size =
+size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
+ const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
+ const size_t optimize_map_size =
(Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
Common::BitSize<u64>()) *
sizeof(u64);
- const std::size_t manager_meta_size =
- Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
- const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
+ const size_t manager_meta_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
+ const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
return manager_meta_size + page_heap_size;
}
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 17c7690f1..18775b262 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -5,11 +5,12 @@
#pragma once
#include <array>
-#include <mutex>
#include <tuple>
#include "common/common_funcs.h"
#include "common/common_types.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_page_heap.h"
#include "core/hle/result.h"
@@ -52,22 +53,33 @@ public:
explicit KMemoryManager(Core::System& system_);
- constexpr std::size_t GetSize(Pool pool) const {
- return managers[static_cast<std::size_t>(pool)].GetSize();
+ void Initialize(VAddr management_region, size_t management_region_size);
+
+ constexpr size_t GetSize(Pool pool) const {
+ constexpr Direction GetSizeDirection = Direction::FromFront;
+ size_t total = 0;
+ for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
+ manager = this->GetNextManager(manager, GetSizeDirection)) {
+ total += manager->GetSize();
+ }
+ return total;
}
- void InitializeManager(Pool pool, u64 start_address, u64 end_address);
+ PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
+ ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option);
+ ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option,
+ u64 process_id, u8 fill_pattern);
+
+ static constexpr size_t MaxManagerCount = 10;
- VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
- ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
- u32 heap_fill_value = 0);
- ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
- u32 heap_fill_value = 0);
+ void Close(PAddr address, size_t num_pages);
+ void Close(const KPageLinkedList& pg);
- static constexpr std::size_t MaxManagerCount = 10;
+ void Open(PAddr address, size_t num_pages);
+ void Open(const KPageLinkedList& pg);
public:
- static std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
+ static size_t CalculateManagementOverheadSize(size_t region_size) {
return Impl::CalculateManagementOverheadSize(region_size);
}
@@ -100,17 +112,26 @@ private:
Impl() = default;
~Impl() = default;
- std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
+ size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
+ Pool p);
VAddr AllocateBlock(s32 index, bool random) {
return heap.AllocateBlock(index, random);
}
- void Free(VAddr addr, std::size_t num_pages) {
+ void Free(VAddr addr, size_t num_pages) {
heap.Free(addr, num_pages);
}
- constexpr std::size_t GetSize() const {
+ void SetInitialUsedHeapSize(size_t reserved_size) {
+ heap.SetInitialUsedSize(reserved_size);
+ }
+
+ constexpr Pool GetPool() const {
+ return pool;
+ }
+
+ constexpr size_t GetSize() const {
return heap.GetSize();
}
@@ -122,10 +143,88 @@ private:
return heap.GetEndAddress();
}
- static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
+ constexpr size_t GetPageOffset(PAddr address) const {
+ return heap.GetPageOffset(address);
+ }
+
+ constexpr size_t GetPageOffsetToEnd(PAddr address) const {
+ return heap.GetPageOffsetToEnd(address);
+ }
+
+ constexpr void SetNext(Impl* n) {
+ next = n;
+ }
+
+ constexpr void SetPrev(Impl* n) {
+ prev = n;
+ }
+
+ constexpr Impl* GetNext() const {
+ return next;
+ }
+
+ constexpr Impl* GetPrev() const {
+ return prev;
+ }
+
+ void OpenFirst(PAddr address, size_t num_pages) {
+ size_t index = this->GetPageOffset(address);
+ const size_t end = index + num_pages;
+ while (index < end) {
+ const RefCount ref_count = (++page_reference_counts[index]);
+ ASSERT(ref_count == 1);
- static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
- std::size_t region_size) {
+ index++;
+ }
+ }
+
+ void Open(PAddr address, size_t num_pages) {
+ size_t index = this->GetPageOffset(address);
+ const size_t end = index + num_pages;
+ while (index < end) {
+ const RefCount ref_count = (++page_reference_counts[index]);
+ ASSERT(ref_count > 1);
+
+ index++;
+ }
+ }
+
+ void Close(PAddr address, size_t num_pages) {
+ size_t index = this->GetPageOffset(address);
+ const size_t end = index + num_pages;
+
+ size_t free_start = 0;
+ size_t free_count = 0;
+ while (index < end) {
+ ASSERT(page_reference_counts[index] > 0);
+ const RefCount ref_count = (--page_reference_counts[index]);
+
+ // Keep track of how many zero refcounts we see in a row, to minimize calls to free.
+ if (ref_count == 0) {
+ if (free_count > 0) {
+ free_count++;
+ } else {
+ free_start = index;
+ free_count = 1;
+ }
+ } else {
+ if (free_count > 0) {
+ this->Free(heap.GetAddress() + free_start * PageSize, free_count);
+ free_count = 0;
+ }
+ }
+
+ index++;
+ }
+
+ if (free_count > 0) {
+ this->Free(heap.GetAddress() + free_start * PageSize, free_count);
+ }
+ }
+
+ static size_t CalculateManagementOverheadSize(size_t region_size);
+
+ static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
Common::BitSize<u64>()) *
sizeof(u64);
@@ -135,13 +234,45 @@ private:
using RefCount = u16;
KPageHeap heap;
+ std::vector<RefCount> page_reference_counts;
+ VAddr management_region{};
Pool pool{};
+ Impl* next{};
+ Impl* prev{};
};
private:
+ Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
+ return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
+ }
+
+ const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
+ return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
+ }
+
+ constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
+ return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
+ : pool_managers_head[static_cast<size_t>(pool)];
+ }
+
+ constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
+ if (dir == Direction::FromBack) {
+ return cur->GetPrev();
+ } else {
+ return cur->GetNext();
+ }
+ }
+
+ ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
+ Direction dir, bool random);
+
+private:
Core::System& system;
- std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
+ std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
+ std::array<Impl*, MaxManagerCount> pool_managers_head{};
+ std::array<Impl*, MaxManagerCount> pool_managers_tail{};
std::array<Impl, MaxManagerCount> managers;
+ size_t num_managers{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h
index a05e66677..0baeddf51 100644
--- a/src/core/hle/kernel/k_memory_region_type.h
+++ b/src/core/hle/kernel/k_memory_region_type.h
@@ -14,7 +14,8 @@
namespace Kernel {
enum KMemoryRegionType : u32 {
- KMemoryRegionAttr_CarveoutProtected = 0x04000000,
+ KMemoryRegionAttr_CarveoutProtected = 0x02000000,
+ KMemoryRegionAttr_Uncached = 0x04000000,
KMemoryRegionAttr_DidKernelMap = 0x08000000,
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
KMemoryRegionAttr_UserReadOnly = 0x20000000,
@@ -239,6 +240,11 @@ static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
+// UNUSED: .DeriveSparse(2, 2, 0);
+constexpr auto KMemoryRegionType_VirtualDramUnknownDebug =
+ KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
+static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
+
constexpr auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr auto KMemoryRegionType_VirtualDramPoolManagement =
@@ -330,6 +336,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
+ } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
+ return KMemoryRegionType_VirtualDramUnknownDebug;
} else {
return KMemoryRegionType_Dram;
}
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 29d996d62..97a5890a0 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -7,35 +7,51 @@
namespace Kernel {
-void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
- // Check our assumptions
- ASSERT(Common::IsAligned((address), PageSize));
+void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
+ size_t management_size, const size_t* block_shifts,
+ size_t num_block_shifts) {
+ // Check our assumptions.
+ ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
+ const VAddr management_end = management_address + management_size;
- // Set our members
- heap_address = address;
- heap_size = size;
-
- // Setup bitmaps
- metadata.resize(metadata_size / sizeof(u64));
- u64* cur_bitmap_storage{metadata.data()};
- for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
- const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
- const std::size_t next_block_shift{
- (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
- cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
- next_block_shift, cur_bitmap_storage);
+ // Set our members.
+ m_heap_address = address;
+ m_heap_size = size;
+ m_num_blocks = num_block_shifts;
+
+ // Setup bitmaps.
+ m_management_data.resize(management_size / sizeof(u64));
+ u64* cur_bitmap_storage{m_management_data.data()};
+ for (size_t i = 0; i < num_block_shifts; i++) {
+ const size_t cur_block_shift = block_shifts[i];
+ const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
+ cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift,
+ next_block_shift, cur_bitmap_storage);
}
+
+ // Ensure we didn't overextend our bounds.
+ ASSERT(VAddr(cur_bitmap_storage) <= management_end);
+}
+
+size_t KPageHeap::GetNumFreePages() const {
+ size_t num_free = 0;
+
+ for (size_t i = 0; i < m_num_blocks; i++) {
+ num_free += m_blocks[i].GetNumFreePages();
+ }
+
+ return num_free;
}
-VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
- const std::size_t needed_size{blocks[index].GetSize()};
+PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
+ const size_t needed_size = m_blocks[index].GetSize();
- for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
- if (const VAddr addr{blocks[i].PopBlock(random)}; addr) {
- if (const std::size_t allocated_size{blocks[i].GetSize()};
- allocated_size > needed_size) {
- Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
+ for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
+ if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) {
+ if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
+ this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
return addr;
}
@@ -44,34 +60,34 @@ VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
return 0;
}
-void KPageHeap::FreeBlock(VAddr block, s32 index) {
+void KPageHeap::FreeBlock(PAddr block, s32 index) {
do {
- block = blocks[index++].PushBlock(block);
+ block = m_blocks[index++].PushBlock(block);
} while (block != 0);
}
-void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
- // Freeing no pages is a no-op
+void KPageHeap::Free(PAddr addr, size_t num_pages) {
+ // Freeing no pages is a no-op.
if (num_pages == 0) {
return;
}
- // Find the largest block size that we can free, and free as many as possible
- s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
- const VAddr start{addr};
- const VAddr end{(num_pages * PageSize) + addr};
- VAddr before_start{start};
- VAddr before_end{start};
- VAddr after_start{end};
- VAddr after_end{end};
+ // Find the largest block size that we can free, and free as many as possible.
+ s32 big_index = static_cast<s32>(m_num_blocks) - 1;
+ const PAddr start = addr;
+ const PAddr end = addr + num_pages * PageSize;
+ PAddr before_start = start;
+ PAddr before_end = start;
+ PAddr after_start = end;
+ PAddr after_end = end;
while (big_index >= 0) {
- const std::size_t block_size{blocks[big_index].GetSize()};
- const VAddr big_start{Common::AlignUp((start), block_size)};
- const VAddr big_end{Common::AlignDown((end), block_size)};
+ const size_t block_size = m_blocks[big_index].GetSize();
+ const PAddr big_start = Common::AlignUp(start, block_size);
+ const PAddr big_end = Common::AlignDown(end, block_size);
if (big_start < big_end) {
- // Free as many big blocks as we can
- for (auto block{big_start}; block < big_end; block += block_size) {
- FreeBlock(block, big_index);
+ // Free as many big blocks as we can.
+ for (auto block = big_start; block < big_end; block += block_size) {
+ this->FreeBlock(block, big_index);
}
before_end = big_start;
after_start = big_end;
@@ -81,31 +97,31 @@ void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
}
ASSERT(big_index >= 0);
- // Free space before the big blocks
- for (s32 i{big_index - 1}; i >= 0; i--) {
- const std::size_t block_size{blocks[i].GetSize()};
+ // Free space before the big blocks.
+ for (s32 i = big_index - 1; i >= 0; i--) {
+ const size_t block_size = m_blocks[i].GetSize();
while (before_start + block_size <= before_end) {
before_end -= block_size;
- FreeBlock(before_end, i);
+ this->FreeBlock(before_end, i);
}
}
- // Free space after the big blocks
- for (s32 i{big_index - 1}; i >= 0; i--) {
- const std::size_t block_size{blocks[i].GetSize()};
+ // Free space after the big blocks.
+ for (s32 i = big_index - 1; i >= 0; i--) {
+ const size_t block_size = m_blocks[i].GetSize();
while (after_start + block_size <= after_end) {
- FreeBlock(after_start, i);
+ this->FreeBlock(after_start, i);
after_start += block_size;
}
}
}
-std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) {
- std::size_t overhead_size = 0;
- for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
- const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
- const std::size_t next_block_shift{
- (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
+size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
+ size_t num_block_shifts) {
+ size_t overhead_size = 0;
+ for (size_t i = 0; i < num_block_shifts; i++) {
+ const size_t cur_block_shift = block_shifts[i];
+ const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(
region_size, cur_block_shift, next_block_shift);
}
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index a65aa28a0..60fff766b 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -23,54 +23,73 @@ public:
KPageHeap() = default;
~KPageHeap() = default;
- constexpr VAddr GetAddress() const {
- return heap_address;
+ constexpr PAddr GetAddress() const {
+ return m_heap_address;
}
- constexpr std::size_t GetSize() const {
- return heap_size;
+ constexpr size_t GetSize() const {
+ return m_heap_size;
}
- constexpr VAddr GetEndAddress() const {
- return GetAddress() + GetSize();
+ constexpr PAddr GetEndAddress() const {
+ return this->GetAddress() + this->GetSize();
}
- constexpr std::size_t GetPageOffset(VAddr block) const {
- return (block - GetAddress()) / PageSize;
+ constexpr size_t GetPageOffset(PAddr block) const {
+ return (block - this->GetAddress()) / PageSize;
+ }
+ constexpr size_t GetPageOffsetToEnd(PAddr block) const {
+ return (this->GetEndAddress() - block) / PageSize;
+ }
+
+ void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
+ size_t management_size) {
+ return this->Initialize(heap_address, heap_size, management_address, management_size,
+ MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
+ }
+
+ size_t GetFreeSize() const {
+ return this->GetNumFreePages() * PageSize;
}
- void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
- VAddr AllocateBlock(s32 index, bool random);
- void Free(VAddr addr, std::size_t num_pages);
+ void SetInitialUsedSize(size_t reserved_size) {
+ // Check that the reserved size is valid.
+ const size_t free_size = this->GetNumFreePages() * PageSize;
+ ASSERT(m_heap_size >= free_size + reserved_size);
- void UpdateUsedSize() {
- used_size = heap_size - (GetNumFreePages() * PageSize);
+ // Set the initial used size.
+ m_initial_used_size = m_heap_size - free_size - reserved_size;
}
- static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
+ PAddr AllocateBlock(s32 index, bool random);
+ void Free(PAddr addr, size_t num_pages);
+
+ static size_t CalculateManagementOverheadSize(size_t region_size) {
+ return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
+ NumMemoryBlockPageShifts);
+ }
- static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
- const auto target_pages{std::max(num_pages, align_pages)};
- for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
- if (target_pages <=
- (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
+ const size_t target_pages = std::max(num_pages, align_pages);
+ for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
+ if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
return -1;
}
- static constexpr s32 GetBlockIndex(std::size_t num_pages) {
- for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
- if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ static constexpr s32 GetBlockIndex(size_t num_pages) {
+ for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
+ if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
return -1;
}
- static constexpr std::size_t GetBlockSize(std::size_t index) {
- return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
+ static constexpr size_t GetBlockSize(size_t index) {
+ return size_t(1) << MemoryBlockPageShifts[index];
}
- static constexpr std::size_t GetBlockNumPages(std::size_t index) {
+ static constexpr size_t GetBlockNumPages(size_t index) {
return GetBlockSize(index) / PageSize;
}
@@ -83,114 +102,116 @@ private:
Block() = default;
~Block() = default;
- constexpr std::size_t GetShift() const {
- return block_shift;
+ constexpr size_t GetShift() const {
+ return m_block_shift;
}
- constexpr std::size_t GetNextShift() const {
- return next_block_shift;
+ constexpr size_t GetNextShift() const {
+ return m_next_block_shift;
}
- constexpr std::size_t GetSize() const {
- return static_cast<std::size_t>(1) << GetShift();
+ constexpr size_t GetSize() const {
+ return u64(1) << this->GetShift();
}
- constexpr std::size_t GetNumPages() const {
- return GetSize() / PageSize;
+ constexpr size_t GetNumPages() const {
+ return this->GetSize() / PageSize;
}
- constexpr std::size_t GetNumFreeBlocks() const {
- return bitmap.GetNumBits();
+ constexpr size_t GetNumFreeBlocks() const {
+ return m_bitmap.GetNumBits();
}
- constexpr std::size_t GetNumFreePages() const {
- return GetNumFreeBlocks() * GetNumPages();
+ constexpr size_t GetNumFreePages() const {
+ return this->GetNumFreeBlocks() * this->GetNumPages();
}
- u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
- u64* bit_storage) {
- // Set shifts
- block_shift = bs;
- next_block_shift = nbs;
-
- // Align up the address
- VAddr end{addr + size};
- const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
- : (1ULL << block_shift)};
- addr = Common::AlignDown((addr), align);
- end = Common::AlignUp((end), align);
-
- heap_address = addr;
- end_offset = (end - addr) / (1ULL << block_shift);
- return bitmap.Initialize(bit_storage, end_offset);
+ u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) {
+ // Set shifts.
+ m_block_shift = bs;
+ m_next_block_shift = nbs;
+
+ // Align up the address.
+ PAddr end = addr + size;
+ const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
+ : (u64(1) << m_block_shift);
+ addr = Common::AlignDown(addr, align);
+ end = Common::AlignUp(end, align);
+
+ m_heap_address = addr;
+ m_end_offset = (end - addr) / (u64(1) << m_block_shift);
+ return m_bitmap.Initialize(bit_storage, m_end_offset);
}
- VAddr PushBlock(VAddr address) {
- // Set the bit for the free block
- std::size_t offset{(address - heap_address) >> GetShift()};
- bitmap.SetBit(offset);
+ PAddr PushBlock(PAddr address) {
+ // Set the bit for the free block.
+ size_t offset = (address - m_heap_address) >> this->GetShift();
+ m_bitmap.SetBit(offset);
- // If we have a next shift, try to clear the blocks below and return the address
- if (GetNextShift()) {
- const auto diff{1ULL << (GetNextShift() - GetShift())};
+ // If we have a next shift, try to clear the blocks below this one and return the new
+ // address.
+ if (this->GetNextShift()) {
+ const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
offset = Common::AlignDown(offset, diff);
- if (bitmap.ClearRange(offset, diff)) {
- return heap_address + (offset << GetShift());
+ if (m_bitmap.ClearRange(offset, diff)) {
+ return m_heap_address + (offset << this->GetShift());
}
}
- // We couldn't coalesce, or we're already as big as possible
- return 0;
+ // We couldn't coalesce, or we're already as big as possible.
+ return {};
}
- VAddr PopBlock(bool random) {
- // Find a free block
- const s64 soffset{bitmap.FindFreeBlock(random)};
+ PAddr PopBlock(bool random) {
+ // Find a free block.
+ s64 soffset = m_bitmap.FindFreeBlock(random);
if (soffset < 0) {
- return 0;
+ return {};
}
- const auto offset{static_cast<std::size_t>(soffset)};
+ const size_t offset = static_cast<size_t>(soffset);
- // Update our tracking and return it
- bitmap.ClearBit(offset);
- return heap_address + (offset << GetShift());
+ // Update our tracking and return it.
+ m_bitmap.ClearBit(offset);
+ return m_heap_address + (offset << this->GetShift());
}
- static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size,
- std::size_t cur_block_shift,
- std::size_t next_block_shift) {
- const auto cur_block_size{(1ULL << cur_block_shift)};
- const auto next_block_size{(1ULL << next_block_shift)};
- const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
+ public:
+ static constexpr size_t CalculateManagementOverheadSize(size_t region_size,
+ size_t cur_block_shift,
+ size_t next_block_shift) {
+ const size_t cur_block_size = (u64(1) << cur_block_shift);
+ const size_t next_block_size = (u64(1) << next_block_shift);
+ const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
return KPageBitmap::CalculateManagementOverheadSize(
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
}
private:
- KPageBitmap bitmap;
- VAddr heap_address{};
- uintptr_t end_offset{};
- std::size_t block_shift{};
- std::size_t next_block_shift{};
+ KPageBitmap m_bitmap;
+ PAddr m_heap_address{};
+ uintptr_t m_end_offset{};
+ size_t m_block_shift{};
+ size_t m_next_block_shift{};
};
- constexpr std::size_t GetNumFreePages() const {
- std::size_t num_free{};
-
- for (const auto& block : blocks) {
- num_free += block.GetNumFreePages();
- }
-
- return num_free;
- }
+private:
+ void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
+ size_t management_size, const size_t* block_shifts, size_t num_block_shifts);
+ size_t GetNumFreePages() const;
- void FreeBlock(VAddr block, s32 index);
+ void FreeBlock(PAddr block, s32 index);
- static constexpr std::size_t NumMemoryBlockPageShifts{7};
- static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
+ static constexpr size_t NumMemoryBlockPageShifts{7};
+ static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
};
- VAddr heap_address{};
- std::size_t heap_size{};
- std::size_t used_size{};
- std::array<Block, NumMemoryBlockPageShifts> blocks{};
- std::vector<u64> metadata;
+private:
+ static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
+ size_t num_block_shifts);
+
+private:
+ PAddr m_heap_address{};
+ size_t m_heap_size{};
+ size_t m_initial_used_size{};
+ size_t m_num_blocks{};
+ std::array<Block, NumMemoryBlockPageShifts> m_blocks{};
+ std::vector<u64> m_management_data;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 912853e5c..dfea0b6e2 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -41,24 +41,6 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
}
}
-constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) {
- if (info.GetAddress() < addr) {
- return addr;
- }
- return info.GetAddress();
-}
-
-constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) {
- std::size_t size{info.GetSize()};
- if (info.GetAddress() < start) {
- size -= start - info.GetAddress();
- }
- if (info.GetEndAddress() > end) {
- size -= info.GetEndAddress() - end;
- }
- return size;
-}
-
} // namespace
KPageTable::KPageTable(Core::System& system_)
@@ -291,11 +273,12 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
+ KPageLinkedList pg;
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+ &pg, num_pages,
+ KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
- KPageLinkedList page_linked_list;
- R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
- allocation_option));
- R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
+ R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
block_manager->Update(addr, num_pages, state, perm);
@@ -400,148 +383,476 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
-ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
+ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
- // Lock the table.
- KScopedLightLock lk(general_lock);
+ // Calculate the last address for convenience.
+ const VAddr last_address = address + size - 1;
- std::size_t mapped_size{};
- const VAddr end_addr{addr + size};
+ // Define iteration variables.
+ VAddr cur_address;
+ std::size_t mapped_size;
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state != KMemoryState::Free) {
- mapped_size += GetSizeInRange(info, addr, end_addr);
- }
- });
-
- if (mapped_size == size) {
- return ResultSuccess;
- }
+ // The entire mapping process can be retried.
+ while (true) {
+ // Check if the memory is already mapped.
+ {
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
- const std::size_t remaining_size{size - mapped_size};
- const std::size_t remaining_pages{remaining_size / PageSize};
+ // If the size mapped is the size requested, we've nothing to do.
+ R_SUCCEED_IF(size == mapped_size);
+ }
- // Reserve the memory from the process resource limit.
- KScopedResourceReservation memory_reservation(
- system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
- remaining_size);
- if (!memory_reservation.Succeeded()) {
- LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
- return ResultLimitReached;
+ // Allocate and map the memory.
+ {
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(),
+ LimitableResource::PhysicalMemory, size - mapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the new memory.
+ KPageLinkedList pg;
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+ &pg, (size - mapped_size) / PageSize,
+ KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
+
+ // Map the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ size_t num_allocator_blocks = 0;
+
+ // Verify that nobody has mapped memory since we first checked.
+ {
+ // Iterate over the memory.
+ size_t checked_mapped_size = 0;
+ cur_address = address;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ if (is_free) {
+ if (info.GetAddress() < address) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (!is_free) {
+ checked_mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (!is_free) {
+ checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size now isn't what it was before, somebody mapped or unmapped
+ // concurrently. If this happened, retry.
+ if (mapped_size != checked_mapped_size) {
+ continue;
+ }
+ }
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+ auto unmap_guard = detail::ScopeExit([&] {
+ if (cur_address > address) {
+ const VAddr last_unmap_address = cur_address - 1;
+
+ // Iterate, unmapping the pages.
+ cur_address = address;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is free, we mapped it and need to unmap it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to unmap.
+ const size_t cur_pages =
+ std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_unmap_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
+ OperationType::Unmap)
+ .IsSuccess());
+ }
+
+ // Check if we're done.
+ if (last_unmap_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ }
+ });
+
+ // Iterate over the memory.
+ auto pg_it = pg.Nodes().begin();
+ PAddr pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If it's unmapped, we need to map it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to map.
+ size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.Nodes().end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
+ OperationType::Map, pg_phys_addr));
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Increase our tracked mapped size.
+ mapped_physical_memory_size += (size - mapped_size);
+
+ // Update the relevant memory blocks.
+ block_manager->Update(address, size / PageSize, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryState::Normal, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::None);
+
+ // Cancel our guard.
+ unmap_guard.Cancel();
+
+ return ResultSuccess;
+ }
+ }
}
+}
- KPageLinkedList page_linked_list;
+ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
- CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
- memory_pool, allocation_option));
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
- // We succeeded, so commit the memory reservation.
- memory_reservation.Commit();
+ // Calculate the last address for convenience.
+ const VAddr last_address = address + size - 1;
- // Map the memory.
- auto node{page_linked_list.Nodes().begin()};
- PAddr map_addr{node->GetAddress()};
- std::size_t src_num_pages{node->GetNumPages()};
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state != KMemoryState::Free) {
- return;
- }
+ // Define iteration variables.
+ VAddr cur_address = 0;
+ std::size_t mapped_size = 0;
+ std::size_t num_allocator_blocks = 0;
- std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
- VAddr dst_addr{GetAddressInRange(info, addr)};
+ // Check if the memory is mapped.
+ {
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Verify the memory's state.
+ const bool is_normal = info.GetState() == KMemoryState::Normal &&
+ info.GetAttribute() == KMemoryAttribute::None;
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+ if (is_normal) {
+ R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+ if (info.GetAddress() < address) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
- while (dst_num_pages) {
- if (!src_num_pages) {
- node = std::next(node);
- map_addr = node->GetAddress();
- src_num_pages = node->GetNumPages();
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (is_normal) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
}
- const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
- Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
- map_addr);
+ // Track the memory if it's mapped.
+ if (is_normal) {
+ mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
- dst_addr += num_pages * PageSize;
- map_addr += num_pages * PageSize;
- src_num_pages -= num_pages;
- dst_num_pages -= num_pages;
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
}
- });
- mapped_physical_memory_size += remaining_size;
-
- const std::size_t num_pages{size / PageSize};
- block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
-
- return ResultSuccess;
-}
+ // If there's nothing mapped, we've nothing to do.
+ R_SUCCEED_IF(mapped_size == 0);
+ }
-ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+ // Make a page group for the unmap region.
+ KPageLinkedList pg;
+ {
+ auto& impl = this->PageTableImpl();
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ bool cur_valid = false;
+ Common::PageTable::TraversalEntry next_entry;
+ bool next_valid = false;
+ size_t tot_size = 0;
+
+ cur_address = address;
+ next_valid = impl.BeginTraversal(next_entry, context, cur_address);
+ next_entry.block_size =
+ (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
+
+ // Iterate, building the group.
+ while (true) {
+ if ((!next_valid && !cur_valid) ||
+ (next_valid && cur_valid &&
+ next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+ cur_entry.block_size += next_entry.block_size;
+ } else {
+ if (cur_valid) {
+ // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+ R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
+ }
+
+ // Update tracking variables.
+ tot_size += cur_entry.block_size;
+ cur_entry = next_entry;
+ cur_valid = next_valid;
+ }
- // Lock the table.
- KScopedLightLock lk(general_lock);
+ if (cur_entry.block_size + tot_size >= size) {
+ break;
+ }
- const VAddr end_addr{addr + size};
- ResultCode result{ResultSuccess};
- std::size_t mapped_size{};
+ next_valid = impl.ContinueTraversal(next_entry, context);
+ }
- // Verify that the region can be unmapped
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state == KMemoryState::Normal) {
- if (info.attribute != KMemoryAttribute::None) {
- result = ResultInvalidCurrentMemory;
- return;
+ // Add the last block.
+ if (cur_valid) {
+ // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+ R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
+ }
+ }
+ ASSERT(pg.GetNumPages() == mapped_size / PageSize);
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+ auto remap_guard = detail::ScopeExit([&] {
+ if (cur_address > address) {
+ const VAddr last_map_address = cur_address - 1;
+ cur_address = address;
+
+ // Iterate over the memory we unmapped.
+ auto it = block_manager->FindIterator(cur_address);
+ auto pg_it = pg.Nodes().begin();
+ PAddr pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ while (true) {
+ // Get the memory info for the pages we unmapped, convert to property.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory is normal, we unmapped it and need to re-map it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to map.
+ size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_map_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.Nodes().end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
+ OperationType::Map, pg_phys_addr) == ResultSuccess);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Check if we're done.
+ if (last_map_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
}
- mapped_size += GetSizeInRange(info, addr, end_addr);
- } else if (info.state != KMemoryState::Free) {
- result = ResultInvalidCurrentMemory;
}
});
- if (result.IsError()) {
- return result;
- }
+ // Iterate over the memory, unmapping as we go.
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
- if (!mapped_size) {
- return ResultSuccess;
- }
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
- // Unmap each region within the range
- KPageLinkedList page_linked_list;
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state == KMemoryState::Normal) {
- const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
- const std::size_t block_num_pages{block_size / PageSize};
- const VAddr block_addr{GetAddressInRange(info, addr)};
-
- AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
-
- if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None,
- OperationType::Unmap);
- result.IsError()) {
- return;
- }
+ // If the memory state is normal, we need to unmap it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to unmap.
+ const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
}
- });
- if (result.IsError()) {
- return result;
- }
- const std::size_t num_pages{size / PageSize};
- system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool,
- allocation_option);
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
- block_manager->Update(addr, num_pages, KMemoryState::Free);
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ // Release the memory resource.
+ mapped_physical_memory_size -= mapped_size;
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
- mapped_physical_memory_size -= mapped_size;
+
+ // Update memory blocks.
+ block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None);
+
+ // TODO(bunnei): This is a workaround until the next set of changes, where we add reference
+ // counting for mapped pages. Until then, we must manually close the reference to the page
+ // group.
+ system.Kernel().MemoryManager().Close(pg);
+
+ // We succeeded.
+ remap_guard.Cancel();
return ResultSuccess;
}
@@ -681,9 +992,8 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
- const std::size_t num_pages{(addr - cur_addr) / PageSize};
- if (const auto result{
- Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)};
+ if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
+ OperationType::Unmap)};
result.IsError()) {
return result;
}
@@ -966,9 +1276,16 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the heap extension.
- KPageLinkedList page_linked_list;
- R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize,
- memory_pool, allocation_option));
+ KPageLinkedList pg;
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+ &pg, allocation_size / PageSize,
+ KMemoryManager::EncodeOption(memory_pool, allocation_option)));
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg.Nodes()) {
+ std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
+ it.GetSize());
+ }
// Map the pages.
{
@@ -987,7 +1304,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Map the pages.
const auto num_pages = allocation_size / PageSize;
- R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup));
+ R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup));
// Clear all the newly allocated pages.
for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
@@ -1035,8 +1352,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
KPageLinkedList page_group;
- R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool,
- allocation_option));
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+ &page_group, needed_num_pages,
+ KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
}
@@ -1243,7 +1561,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss
return ResultSuccess;
}
-constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
+VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
@@ -1279,7 +1597,7 @@ constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
}
}
-constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
+std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index c98887d34..194177332 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -102,8 +102,8 @@ private:
OperationType operation);
ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
OperationType operation, PAddr map_addr = 0);
- constexpr VAddr GetRegionAddress(KMemoryState state) const;
- constexpr std::size_t GetRegionSize(KMemoryState state) const;
+ VAddr GetRegionAddress(KMemoryState state) const;
+ std::size_t GetRegionSize(KMemoryState state) const;
ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
std::size_t size, KMemoryState state_mask,
@@ -254,8 +254,7 @@ public:
return !IsOutsideASLRRegion(address, size);
}
- PAddr GetPhysicalAddr(VAddr addr) {
- ASSERT(IsLockedByCurrentThread());
+ PAddr GetPhysicalAddr(VAddr addr) const {
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
ASSERT(backing_addr);
return backing_addr + addr;
@@ -311,6 +310,8 @@ private:
bool is_kernel{};
bool is_aslr_enabled{};
+ u32 heap_fill_value{};
+
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 85c506979..9233261cd 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -123,12 +123,11 @@ private:
};
ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type) {
+ ProcessType type, KResourceLimit* res_limit) {
auto& kernel = system.Kernel();
process->name = std::move(process_name);
-
- process->resource_limit = kernel.GetSystemResourceLimit();
+ process->resource_limit = res_limit;
process->status = ProcessStatus::Created;
process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
@@ -143,9 +142,6 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
kernel.AppendNewProcess(process);
- // Open a reference to the resource limit.
- process->resource_limit->Open();
-
// Clear remaining fields.
process->num_running_threads = 0;
process->is_signaled = false;
@@ -153,6 +149,9 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
process->is_suspended = false;
process->schedule_count = 0;
+ // Open a reference to the resource limit.
+ process->resource_limit->Open();
+
return ResultSuccess;
}
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 38b446350..cf1b67428 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -91,7 +91,7 @@ public:
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type);
+ ProcessType type, KResourceLimit* res_limit);
/// Gets a reference to the process' page table.
KPageTable& PageTable() {
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index 0c4bba66b..a84977c68 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include "common/assert.h"
+#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/svc_results.h"
@@ -151,4 +152,22 @@ void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
}
}
+KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) {
+ auto* resource_limit = KResourceLimit::Create(system.Kernel());
+ resource_limit->Initialize(&system.CoreTiming());
+
+ // Initialize default resource limit values.
+ // TODO(bunnei): These values are the system defaults, the limits for service processes are
+ // lower. These should use the correct limit values.
+
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, physical_memory_size)
+ .IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200).IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess());
+
+ return resource_limit;
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index fab6005ff..d23d16aa4 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -67,4 +67,7 @@ private:
KLightConditionVariable cond_var;
const Core::Timing::CoreTiming* core_timing{};
};
+
+KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size);
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 49c0714ed..71bd466cf 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -70,13 +70,12 @@ struct KernelCore::Impl {
// Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
- KMemoryLayout memory_layout;
- DeriveInitialMemoryLayout(memory_layout);
- Init::InitializeSlabHeaps(system, memory_layout);
+ DeriveInitialMemoryLayout();
+ Init::InitializeSlabHeaps(system, *memory_layout);
// Initialize kernel memory and resources.
- InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
- InitializeMemoryLayout(memory_layout);
+ InitializeSystemResourceLimit(kernel, system.CoreTiming());
+ InitializeMemoryLayout();
InitializePageSlab();
InitializeSchedulers();
InitializeSuspendThreads();
@@ -219,12 +218,11 @@ struct KernelCore::Impl {
// Creates the default system resource limit
void InitializeSystemResourceLimit(KernelCore& kernel,
- const Core::Timing::CoreTiming& core_timing,
- const KMemoryLayout& memory_layout) {
+ const Core::Timing::CoreTiming& core_timing) {
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
- const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
+ const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes();
// If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
@@ -240,13 +238,6 @@ struct KernelCore::Impl {
constexpr u64 secure_applet_memory_size{4_MiB};
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
secure_applet_memory_size));
-
- // This memory seems to be reserved on hardware, but is not reserved/used by yuzu.
- // Likely Horizon OS reserved memory
- // TODO(ameerj): Derive the memory rather than hardcode it.
- constexpr u64 unknown_reserved_memory{0x2f896000};
- ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
- unknown_reserved_memory));
}
void InitializePreemption(KernelCore& kernel) {
@@ -360,16 +351,18 @@ struct KernelCore::Impl {
return schedulers[thread_id]->GetCurrentThread();
}
- void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) {
+ void DeriveInitialMemoryLayout() {
+ memory_layout = std::make_unique<KMemoryLayout>();
+
// Insert the root region for the virtual memory tree, from which all other regions will
// derive.
- memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
+ memory_layout->GetVirtualMemoryRegionTree().InsertDirectly(
KernelVirtualAddressSpaceBase,
KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
// Insert the root region for the physical memory tree, from which all other regions will
// derive.
- memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly(
+ memory_layout->GetPhysicalMemoryRegionTree().InsertDirectly(
KernelPhysicalAddressSpaceBase,
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
@@ -386,7 +379,7 @@ struct KernelCore::Impl {
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
}
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
// Setup the code region.
@@ -395,11 +388,11 @@ struct KernelCore::Impl {
Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
constexpr size_t code_region_size = code_region_end - code_region_start;
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
code_region_start, code_region_size, KMemoryRegionType_KernelCode));
// Setup board-specific device physical regions.
- Init::SetupDevicePhysicalMemoryRegions(memory_layout);
+ Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
// Determine the amount of space needed for the misc region.
size_t misc_region_needed_size;
@@ -408,7 +401,7 @@ struct KernelCore::Impl {
misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
// Account for each auto-map device.
- for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (const auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
// Check that the region is valid.
ASSERT(region.GetEndAddress() != 0);
@@ -433,22 +426,22 @@ struct KernelCore::Impl {
// Setup the misc region.
const VAddr misc_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
const VAddr stack_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
- const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
+ const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit();
// Determine the size of the slab region.
const size_t slab_region_size =
@@ -465,23 +458,23 @@ struct KernelCore::Impl {
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
const VAddr slab_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
(code_end_phys_addr % SlabRegionAlign);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
// Setup the temp region.
constexpr size_t TempRegionSize = 128_MiB;
constexpr size_t TempRegionAlign = KernelAslrAlignment;
const VAddr temp_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
- KMemoryRegionType_KernelTemp));
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
+ KMemoryRegionType_KernelTemp));
// Automatically map in devices that have auto-map attributes.
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
// We only care about kernel regions.
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
continue;
@@ -508,21 +501,21 @@ struct KernelCore::Impl {
const size_t map_size =
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
const VAddr map_virt_addr =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
}
- Init::SetupDramPhysicalMemoryRegions(memory_layout);
+ Init::SetupDramPhysicalMemoryRegions(*memory_layout);
// Insert a physical region for the kernel code region.
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
// Insert a physical region for the kernel slab region.
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
// Determine size available for kernel page table heaps, requiring > 8 MB.
@@ -531,12 +524,12 @@ struct KernelCore::Impl {
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
// mapping. Tag them.
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.GetType() == KMemoryRegionType_Dram) {
// Check that the region is valid.
ASSERT(region.GetEndAddress() != 0);
@@ -548,7 +541,7 @@ struct KernelCore::Impl {
// Get the linear region extents.
const auto linear_extents =
- memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
+ memory_layout->GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionAttr_LinearMapped);
ASSERT(linear_extents.GetEndAddress() != 0);
@@ -560,7 +553,7 @@ struct KernelCore::Impl {
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
aligned_linear_phys_start;
const VAddr linear_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
@@ -569,7 +562,7 @@ struct KernelCore::Impl {
{
PAddr cur_phys_addr = 0;
u64 cur_size = 0;
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
continue;
}
@@ -588,55 +581,49 @@ struct KernelCore::Impl {
const VAddr region_virt_addr =
region.GetAddress() + linear_region_phys_to_virt_diff;
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
region_virt_addr, region.GetSize(),
GetTypeForVirtualLinearMapping(region.GetType())));
region.SetPairAddress(region_virt_addr);
KMemoryRegion* virt_region =
- memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
+ memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
ASSERT(virt_region != nullptr);
virt_region->SetPairAddress(region.GetAddress());
}
}
// Insert regions for the initial page table region.
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
KMemoryRegionType_VirtualDramKernelInitPt));
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
// some pool partition. Tag them.
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
region.SetType(KMemoryRegionType_DramPoolPartition);
}
}
// Setup all other memory regions needed to arrange the pool partitions.
- Init::SetupPoolPartitionMemoryRegions(memory_layout);
+ Init::SetupPoolPartitionMemoryRegions(*memory_layout);
// Cache all linear regions in their own trees for faster access, later.
- memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
- linear_region_start);
+ memory_layout->InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
+ linear_region_start);
}
- void InitializeMemoryLayout(const KMemoryLayout& memory_layout) {
- const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
- const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
- const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
+ void InitializeMemoryLayout() {
+ const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents();
- // Initialize memory managers
+ // Initialize the memory manager.
memory_manager = std::make_unique<KMemoryManager>(system);
- memory_manager->InitializeManager(KMemoryManager::Pool::Application,
- application_pool.GetAddress(),
- application_pool.GetEndAddress());
- memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
- applet_pool.GetEndAddress());
- memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
- system_pool.GetEndAddress());
+ const auto& management_region = memory_layout->GetPoolManagementRegion();
+ ASSERT(management_region.GetEndAddress() != 0);
+ memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
// Setup memory regions for emulated processes
// TODO(bunnei): These should not be hardcoded regions initialized within the kernel
@@ -777,6 +764,9 @@ struct KernelCore::Impl {
Kernel::KSharedMemory* irs_shared_mem{};
Kernel::KSharedMemory* time_shared_mem{};
+ // Memory layout
+ std::unique_ptr<KMemoryLayout> memory_layout;
+
// Threads used for services
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
Common::ThreadWorker service_threads_manager;
@@ -1142,6 +1132,10 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const {
return impl->worker_task_manager;
}
+const KMemoryLayout& KernelCore::MemoryLayout() const {
+ return *impl->memory_layout;
+}
+
bool KernelCore::IsPhantomModeForSingleCore() const {
return impl->IsPhantomModeForSingleCore();
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 0e04fc3bb..c1254b18d 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -41,6 +41,7 @@ class KClientSession;
class KEvent;
class KHandleTable;
class KLinkedListNode;
+class KMemoryLayout;
class KMemoryManager;
class KPort;
class KProcess;
@@ -350,6 +351,9 @@ public:
/// Gets the current worker task manager, used for dispatching KThread/KProcess tasks.
const KWorkerTaskManager& WorkerTaskManager() const;
+ /// Gets the memory layout.
+ const KMemoryLayout& MemoryLayout() const;
+
private:
friend class KProcess;
friend class KThread;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 9836809f2..839171e85 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -2332,7 +2332,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
R_UNLESS(event != nullptr, ResultOutOfResource);
// Initialize the event.
- event->Initialize("CreateEvent");
+ event->Initialize("CreateEvent", kernel.CurrentProcess());
// Commit the thread reservation.
event_reservation.Commit();
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 773dc9f29..2f8e21568 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -618,7 +618,7 @@ void AppletMessageQueue::PushMessage(AppletMessage msg) {
AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() {
if (messages.empty()) {
on_new_message->GetWritableEvent().Clear();
- return AppletMessage::NoMessage;
+ return AppletMessage::None;
}
auto msg = messages.front();
messages.pop();
@@ -633,7 +633,7 @@ std::size_t AppletMessageQueue::GetMessageCount() const {
}
void AppletMessageQueue::RequestExit() {
- PushMessage(AppletMessage::ExitRequested);
+ PushMessage(AppletMessage::Exit);
}
void AppletMessageQueue::FocusStateChanged() {
@@ -732,7 +732,7 @@ void ICommonStateGetter::ReceiveMessage(Kernel::HLERequestContext& ctx) {
const auto message = msg_queue->PopMessage();
IPC::ResponseBuilder rb{ctx, 3};
- if (message == AppletMessageQueue::AppletMessage::NoMessage) {
+ if (message == AppletMessageQueue::AppletMessage::None) {
LOG_ERROR(Service_AM, "Message queue is empty");
rb.Push(ERR_NO_MESSAGES);
rb.PushEnum<AppletMessageQueue::AppletMessage>(message);
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 2a578aea5..fdd937b82 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -22,6 +22,7 @@ class NVFlinger;
namespace Service::AM {
+// This is nn::settings::Language
enum SystemLanguage {
Japanese = 0,
English = 1, // en-US
@@ -41,16 +42,44 @@ enum SystemLanguage {
// 4.0.0+
SimplifiedChinese = 15,
TraditionalChinese = 16,
+ // 10.1.0+
+ BrazilianPortuguese = 17,
};
class AppletMessageQueue {
public:
+ // This is nn::am::AppletMessage
enum class AppletMessage : u32 {
- NoMessage = 0,
- ExitRequested = 4,
+ None = 0,
+ ChangeIntoForeground = 1,
+ ChangeIntoBackground = 2,
+ Exit = 4,
+ ApplicationExited = 6,
FocusStateChanged = 15,
+ Resume = 16,
+ DetectShortPressingHomeButton = 20,
+ DetectLongPressingHomeButton = 21,
+ DetectShortPressingPowerButton = 22,
+ DetectMiddlePressingPowerButton = 23,
+ DetectLongPressingPowerButton = 24,
+ RequestToPrepareSleep = 25,
+ FinishedSleepSequence = 26,
+ SleepRequiredByHighTemperature = 27,
+ SleepRequiredByLowBattery = 28,
+ AutoPowerDown = 29,
OperationModeChanged = 30,
PerformanceModeChanged = 31,
+ DetectReceivingCecSystemStandby = 32,
+ SdCardRemoved = 33,
+ LaunchApplicationRequested = 50,
+ RequestToDisplay = 51,
+ ShowApplicationLogo = 55,
+ HideApplicationLogo = 56,
+ ForceHideApplicationLogo = 57,
+ FloatingApplicationDetected = 60,
+ DetectShortPressingCaptureButton = 90,
+ AlbumScreenShotTaken = 92,
+ AlbumRecordingSaved = 93,
};
explicit AppletMessageQueue(Core::System& system);
@@ -179,11 +208,14 @@ public:
~ICommonStateGetter() override;
private:
+ // This is nn::oe::FocusState
enum class FocusState : u8 {
InFocus = 1,
NotInFocus = 2,
+ Background = 3,
};
+ // This is nn::oe::OperationMode
enum class OperationMode : u8 {
Handheld = 0,
Docked = 1,
diff --git a/src/core/hle/service/apm/apm_controller.cpp b/src/core/hle/service/apm/apm_controller.cpp
index 98839fe97..187fef2ad 100644
--- a/src/core/hle/service/apm/apm_controller.cpp
+++ b/src/core/hle/service/apm/apm_controller.cpp
@@ -17,8 +17,8 @@ constexpr auto DEFAULT_PERFORMANCE_CONFIGURATION = PerformanceConfiguration::Con
Controller::Controller(Core::Timing::CoreTiming& core_timing_)
: core_timing{core_timing_}, configs{
- {PerformanceMode::Handheld, DEFAULT_PERFORMANCE_CONFIGURATION},
- {PerformanceMode::Docked, DEFAULT_PERFORMANCE_CONFIGURATION},
+ {PerformanceMode::Normal, DEFAULT_PERFORMANCE_CONFIGURATION},
+ {PerformanceMode::Boost, DEFAULT_PERFORMANCE_CONFIGURATION},
} {}
Controller::~Controller() = default;
@@ -63,13 +63,13 @@ void Controller::SetFromCpuBoostMode(CpuBoostMode mode) {
PerformanceConfiguration::Config15,
}};
- SetPerformanceConfiguration(PerformanceMode::Docked,
+ SetPerformanceConfiguration(PerformanceMode::Boost,
BOOST_MODE_TO_CONFIG_MAP.at(static_cast<u32>(mode)));
}
PerformanceMode Controller::GetCurrentPerformanceMode() const {
- return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Docked
- : PerformanceMode::Handheld;
+ return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Boost
+ : PerformanceMode::Normal;
}
PerformanceConfiguration Controller::GetCurrentPerformanceConfiguration(PerformanceMode mode) {
diff --git a/src/core/hle/service/apm/apm_controller.h b/src/core/hle/service/apm/apm_controller.h
index 8d48e0104..d6fbd2c0c 100644
--- a/src/core/hle/service/apm/apm_controller.h
+++ b/src/core/hle/service/apm/apm_controller.h
@@ -32,15 +32,18 @@ enum class PerformanceConfiguration : u32 {
Config16 = 0x9222000C,
};
+// This is nn::oe::CpuBoostMode
enum class CpuBoostMode : u32 {
- Disabled = 0,
- Full = 1, // CPU + GPU -> Config 13, 14, 15, or 16
- Partial = 2, // GPU Only -> Config 15 or 16
+ Normal = 0, // Boost mode disabled
+ FastLoad = 1, // CPU + GPU -> Config 13, 14, 15, or 16
+ Partial = 2, // GPU Only -> Config 15 or 16
};
-enum class PerformanceMode : u8 {
- Handheld = 0,
- Docked = 1,
+// This is nn::oe::PerformanceMode
+enum class PerformanceMode : s32 {
+ Invalid = -1,
+ Normal = 0,
+ Boost = 1,
};
// Class to manage the state and change of the emulated system performance.
diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp
index 62f4cdfb2..b8c2c6e51 100644
--- a/src/core/hle/service/kernel_helpers.cpp
+++ b/src/core/hle/service/kernel_helpers.cpp
@@ -3,7 +3,9 @@
// Refer to the license.txt file included.
#include "core/core.h"
+#include "core/core_timing.h"
#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -15,10 +17,21 @@ namespace Service::KernelHelpers {
ServiceContext::ServiceContext(Core::System& system_, std::string name_)
: kernel(system_.Kernel()) {
+
+ // Create a resource limit for the process.
+ const auto physical_memory_size =
+ kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::System);
+ auto* resource_limit = Kernel::CreateResourceLimitForProcess(system_, physical_memory_size);
+
+ // Create the process.
process = Kernel::KProcess::Create(kernel);
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
- Kernel::KProcess::ProcessType::Userland)
+ Kernel::KProcess::ProcessType::KernelInternal,
+ resource_limit)
.IsSuccess());
+
+ // Close reference to our resource limit, as the process opens one.
+ resource_limit->Close();
}
ServiceContext::~ServiceContext() {
@@ -43,7 +56,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) {
}
// Initialize the event.
- event->Initialize(std::move(name));
+ event->Initialize(std::move(name), process);
// Commit the thread reservation.
event_reservation.Commit();
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 88d6ec908..28d30eee2 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -39,8 +39,7 @@ struct Memory::Impl {
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
- ASSERT_MSG(target >= DramMemoryMap::Base && target < DramMemoryMap::End,
- "Out of bounds target: {:016X}", target);
+ ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
if (Settings::IsFastmemEnabled()) {
diff --git a/src/input_common/drivers/sdl_driver.cpp b/src/input_common/drivers/sdl_driver.cpp
index 5cf1987ad..c17ea305e 100644
--- a/src/input_common/drivers/sdl_driver.cpp
+++ b/src/input_common/drivers/sdl_driver.cpp
@@ -175,22 +175,23 @@ public:
return false;
}
- BatteryLevel GetBatteryLevel() {
+ Common::Input::BatteryLevel GetBatteryLevel() {
const auto level = SDL_JoystickCurrentPowerLevel(sdl_joystick.get());
switch (level) {
case SDL_JOYSTICK_POWER_EMPTY:
- return BatteryLevel::Empty;
+ return Common::Input::BatteryLevel::Empty;
case SDL_JOYSTICK_POWER_LOW:
- return BatteryLevel::Low;
+ return Common::Input::BatteryLevel::Low;
case SDL_JOYSTICK_POWER_MEDIUM:
- return BatteryLevel::Medium;
+ return Common::Input::BatteryLevel::Medium;
case SDL_JOYSTICK_POWER_FULL:
case SDL_JOYSTICK_POWER_MAX:
- return BatteryLevel::Full;
- case SDL_JOYSTICK_POWER_UNKNOWN:
+ return Common::Input::BatteryLevel::Full;
case SDL_JOYSTICK_POWER_WIRED:
+ return Common::Input::BatteryLevel::Charging;
+ case SDL_JOYSTICK_POWER_UNKNOWN:
default:
- return BatteryLevel::Charging;
+ return Common::Input::BatteryLevel::None;
}
}
@@ -351,6 +352,8 @@ void SDLDriver::HandleGameControllerEvent(const SDL_Event& event) {
if (const auto joystick = GetSDLJoystickBySDLID(event.jbutton.which)) {
const PadIdentifier identifier = joystick->GetPadIdentifier();
SetButton(identifier, event.jbutton.button, true);
+ // Battery doesn't trigger an event so just update every button press
+ SetBattery(identifier, joystick->GetBatteryLevel());
}
break;
}
diff --git a/src/input_common/drivers/udp_client.cpp b/src/input_common/drivers/udp_client.cpp
index 64162f431..9780ead10 100644
--- a/src/input_common/drivers/udp_client.cpp
+++ b/src/input_common/drivers/udp_client.cpp
@@ -192,22 +192,22 @@ std::size_t UDPClient::GetClientNumber(std::string_view host, u16 port) const {
return MAX_UDP_CLIENTS;
}
-BatteryLevel UDPClient::GetBatteryLevel(Response::Battery battery) const {
+Common::Input::BatteryLevel UDPClient::GetBatteryLevel(Response::Battery battery) const {
switch (battery) {
case Response::Battery::Dying:
- return BatteryLevel::Empty;
+ return Common::Input::BatteryLevel::Empty;
case Response::Battery::Low:
- return BatteryLevel::Critical;
+ return Common::Input::BatteryLevel::Critical;
case Response::Battery::Medium:
- return BatteryLevel::Low;
+ return Common::Input::BatteryLevel::Low;
case Response::Battery::High:
- return BatteryLevel::Medium;
+ return Common::Input::BatteryLevel::Medium;
case Response::Battery::Full:
case Response::Battery::Charged:
- return BatteryLevel::Full;
+ return Common::Input::BatteryLevel::Full;
case Response::Battery::Charging:
default:
- return BatteryLevel::Charging;
+ return Common::Input::BatteryLevel::Charging;
}
}
diff --git a/src/input_common/drivers/udp_client.h b/src/input_common/drivers/udp_client.h
index 76e32bd04..c7cc7d846 100644
--- a/src/input_common/drivers/udp_client.h
+++ b/src/input_common/drivers/udp_client.h
@@ -141,7 +141,7 @@ private:
std::size_t GetClientNumber(std::string_view host, u16 port) const;
// Translates UDP battery level to input engine battery level
- BatteryLevel GetBatteryLevel(Response::Battery battery) const;
+ Common::Input::BatteryLevel GetBatteryLevel(Response::Battery battery) const;
void OnVersion(Response::Version);
void OnPortInfo(Response::PortInfo);
diff --git a/src/input_common/input_engine.cpp b/src/input_common/input_engine.cpp
index 65ae1b848..7adf7e3d7 100644
--- a/src/input_common/input_engine.cpp
+++ b/src/input_common/input_engine.cpp
@@ -70,7 +70,7 @@ void InputEngine::SetAxis(const PadIdentifier& identifier, int axis, f32 value)
TriggerOnAxisChange(identifier, axis, value);
}
-void InputEngine::SetBattery(const PadIdentifier& identifier, BatteryLevel value) {
+void InputEngine::SetBattery(const PadIdentifier& identifier, Common::Input::BatteryLevel value) {
{
std::lock_guard lock{mutex};
ControllerData& controller = controller_list.at(identifier);
@@ -143,13 +143,13 @@ f32 InputEngine::GetAxis(const PadIdentifier& identifier, int axis) const {
return axis_iter->second;
}
-BatteryLevel InputEngine::GetBattery(const PadIdentifier& identifier) const {
+Common::Input::BatteryLevel InputEngine::GetBattery(const PadIdentifier& identifier) const {
std::lock_guard lock{mutex};
const auto controller_iter = controller_list.find(identifier);
if (controller_iter == controller_list.cend()) {
LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(),
identifier.pad, identifier.port);
- return BatteryLevel::Charging;
+ return Common::Input::BatteryLevel::Charging;
}
const ControllerData& controller = controller_iter->second;
return controller.battery;
@@ -270,7 +270,7 @@ void InputEngine::TriggerOnAxisChange(const PadIdentifier& identifier, int axis,
}
void InputEngine::TriggerOnBatteryChange(const PadIdentifier& identifier,
- [[maybe_unused]] BatteryLevel value) {
+ [[maybe_unused]] Common::Input::BatteryLevel value) {
std::lock_guard lock{mutex_callback};
for (const auto& poller_pair : callback_list) {
const InputIdentifier& poller = poller_pair.second;
diff --git a/src/input_common/input_engine.h b/src/input_common/input_engine.h
index c6c027aef..f44e0799b 100644
--- a/src/input_common/input_engine.h
+++ b/src/input_common/input_engine.h
@@ -34,16 +34,6 @@ struct BasicMotion {
u64 delta_timestamp{};
};
-// Stages of a battery charge
-enum class BatteryLevel {
- Empty,
- Critical,
- Low,
- Medium,
- Full,
- Charging,
-};
-
// Types of input that are stored in the engine
enum class EngineInputType {
None,
@@ -178,7 +168,7 @@ public:
bool GetButton(const PadIdentifier& identifier, int button) const;
bool GetHatButton(const PadIdentifier& identifier, int button, u8 direction) const;
f32 GetAxis(const PadIdentifier& identifier, int axis) const;
- BatteryLevel GetBattery(const PadIdentifier& identifier) const;
+ Common::Input::BatteryLevel GetBattery(const PadIdentifier& identifier) const;
BasicMotion GetMotion(const PadIdentifier& identifier, int motion) const;
int SetCallback(InputIdentifier input_identifier);
@@ -189,7 +179,7 @@ protected:
void SetButton(const PadIdentifier& identifier, int button, bool value);
void SetHatButton(const PadIdentifier& identifier, int button, u8 value);
void SetAxis(const PadIdentifier& identifier, int axis, f32 value);
- void SetBattery(const PadIdentifier& identifier, BatteryLevel value);
+ void SetBattery(const PadIdentifier& identifier, Common::Input::BatteryLevel value);
void SetMotion(const PadIdentifier& identifier, int motion, const BasicMotion& value);
virtual std::string GetHatButtonName([[maybe_unused]] u8 direction_value) const {
@@ -202,13 +192,13 @@ private:
std::unordered_map<int, u8> hat_buttons;
std::unordered_map<int, float> axes;
std::unordered_map<int, BasicMotion> motions;
- BatteryLevel battery{};
+ Common::Input::BatteryLevel battery{};
};
void TriggerOnButtonChange(const PadIdentifier& identifier, int button, bool value);
void TriggerOnHatButtonChange(const PadIdentifier& identifier, int button, u8 value);
void TriggerOnAxisChange(const PadIdentifier& identifier, int axis, f32 value);
- void TriggerOnBatteryChange(const PadIdentifier& identifier, BatteryLevel value);
+ void TriggerOnBatteryChange(const PadIdentifier& identifier, Common::Input::BatteryLevel value);
void TriggerOnMotionChange(const PadIdentifier& identifier, int motion,
const BasicMotion& value);
diff --git a/src/input_common/input_poller.cpp b/src/input_common/input_poller.cpp
index 7f3c08597..82b585ff2 100644
--- a/src/input_common/input_poller.cpp
+++ b/src/input_common/input_poller.cpp
@@ -470,7 +470,7 @@ public:
}
Common::Input::BatteryStatus GetStatus() const {
- return static_cast<Common::Input::BatteryLevel>(input_engine->GetBattery(identifier));
+ return input_engine->GetBattery(identifier);
}
void ForceUpdate() override {
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
index 151290101..293ad7d59 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -31,9 +31,8 @@ bool GLInnerFence::IsSignaled() const {
return true;
}
ASSERT(sync_object.handle != 0);
- GLsizei length;
GLint sync_status;
- glGetSynciv(sync_object.handle, GL_SYNC_STATUS, sizeof(GLint), &length, &sync_status);
+ glGetSynciv(sync_object.handle, GL_SYNC_STATUS, 1, nullptr, &sync_status);
return sync_status == GL_SIGNALED;
}
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
index f8495896c..9e6732abd 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
@@ -243,10 +243,6 @@ GraphicsPipeline::GraphicsPipeline(
case Settings::ShaderBackend::GLASM:
if (!sources[stage].empty()) {
assembly_programs[stage] = CompileProgram(sources[stage], AssemblyStage(stage));
- if (in_parallel) {
- // Make sure program is built before continuing when building in parallel
- glGetString(GL_PROGRAM_ERROR_STRING_NV);
- }
}
break;
case Settings::ShaderBackend::SPIRV:
@@ -256,20 +252,18 @@ GraphicsPipeline::GraphicsPipeline(
break;
}
}
- if (in_parallel && backend != Settings::ShaderBackend::GLASM) {
- // Make sure programs have built if we are building shaders in parallel
- for (OGLProgram& program : source_programs) {
- if (program.handle != 0) {
- GLint status{};
- glGetProgramiv(program.handle, GL_LINK_STATUS, &status);
- }
- }
+ if (in_parallel) {
+ std::lock_guard lock{built_mutex};
+ built_fence.Create();
+ // Flush this context to ensure compilation commands and fence are in the GPU pipe.
+ glFlush();
+ built_condvar.notify_one();
+ } else {
+ is_built = true;
}
if (shader_notify) {
shader_notify->MarkShaderComplete();
}
- is_built = true;
- built_condvar.notify_one();
}};
if (thread_worker) {
thread_worker->QueueWork(std::move(func));
@@ -440,7 +434,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
buffer_cache.UpdateGraphicsBuffers(is_indexed);
buffer_cache.BindHostGeometryBuffers(is_indexed);
- if (!is_built.load(std::memory_order::relaxed)) {
+ if (!IsBuilt()) {
WaitForBuild();
}
const bool use_assembly{assembly_programs[0].handle != 0};
@@ -585,8 +579,26 @@ void GraphicsPipeline::GenerateTransformFeedbackState() {
}
void GraphicsPipeline::WaitForBuild() {
- std::unique_lock lock{built_mutex};
- built_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
+ if (built_fence.handle == 0) {
+ std::unique_lock lock{built_mutex};
+ built_condvar.wait(lock, [this] { return built_fence.handle != 0; });
+ }
+ ASSERT(glClientWaitSync(built_fence.handle, 0, GL_TIMEOUT_IGNORED) != GL_WAIT_FAILED);
+ is_built = true;
+}
+
+bool GraphicsPipeline::IsBuilt() noexcept {
+ if (is_built) {
+ return true;
+ }
+ if (built_fence.handle == 0) {
+ return false;
+ }
+ // Timeout of zero means this is non-blocking
+ const auto sync_status = glClientWaitSync(built_fence.handle, 0, 0);
+ ASSERT(sync_status != GL_WAIT_FAILED);
+ is_built = sync_status != GL_TIMEOUT_EXPIRED;
+ return is_built;
}
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
index 4e28d9a42..311d49f3f 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
@@ -100,9 +100,7 @@ public:
return writes_global_memory;
}
- [[nodiscard]] bool IsBuilt() const noexcept {
- return is_built.load(std::memory_order::relaxed);
- }
+ [[nodiscard]] bool IsBuilt() noexcept;
template <typename Spec>
static auto MakeConfigureSpecFunc() {
@@ -154,7 +152,8 @@ private:
std::mutex built_mutex;
std::condition_variable built_condvar;
- std::atomic_bool is_built{false};
+ OGLSync built_fence{};
+ bool is_built{false};
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index daba42ed9..db5bf1d30 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -184,6 +184,8 @@ inline GLenum VertexFormat(Maxwell::VertexAttribute attrib) {
case Maxwell::VertexAttribute::Size::Size_32_32_32:
case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
return GL_FLOAT;
+ case Maxwell::VertexAttribute::Size::Size_11_11_10:
+ return GL_UNSIGNED_INT_10F_11F_11F_REV;
default:
break;
}
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 751e4792b..1c136c410 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -495,6 +495,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R32G32B32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
return VK_FORMAT_R32G32B32A32_SFLOAT;
+ case Maxwell::VertexAttribute::Size::Size_11_11_10:
+ return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
default:
break;
}
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index c71a1f44d..621a6a071 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -100,6 +100,8 @@ VkFormat GetFormat(const Tegra::FramebufferConfig& framebuffer) {
return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case Tegra::FramebufferConfig::PixelFormat::RGB565_UNORM:
return VK_FORMAT_R5G6B5_UNORM_PACK16;
+ case Tegra::FramebufferConfig::PixelFormat::B8G8R8A8_UNORM:
+ return VK_FORMAT_B8G8R8A8_UNORM;
default:
UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}",
static_cast<u32>(framebuffer.pixel_format));
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 0ba56ff1e..0f62779de 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -554,10 +554,12 @@ void CopyBufferToImage(vk::CommandBuffer cmdbuf, VkBuffer src_buffer, VkImage im
};
}
-[[nodiscard]] bool IsFormatFlipped(PixelFormat format) {
+[[nodiscard]] bool IsFormatFlipped(PixelFormat format, bool emulate_bgr565) {
switch (format) {
case PixelFormat::A1B5G5R5_UNORM:
return true;
+ case PixelFormat::B5G6R5_UNORM:
+ return emulate_bgr565;
default:
return false;
}
@@ -1488,7 +1490,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
};
if (!info.IsRenderTarget()) {
swizzle = info.Swizzle();
- if (IsFormatFlipped(format)) {
+ if (IsFormatFlipped(format, device->MustEmulateBGR565())) {
std::ranges::transform(swizzle, swizzle.begin(), SwapBlueRed);
}
if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0) {
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 153702c0b..effde73c9 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -39,6 +39,11 @@ constexpr std::array DEPTH16_UNORM_STENCIL8_UINT{
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_UNDEFINED,
};
+
+constexpr std::array B5G6R5_UNORM_PACK16{
+ VK_FORMAT_R5G6B5_UNORM_PACK16,
+ VK_FORMAT_UNDEFINED,
+};
} // namespace Alternatives
enum class NvidiaArchitecture {
@@ -87,6 +92,8 @@ constexpr const VkFormat* GetFormatAlternatives(VkFormat format) {
return Alternatives::DEPTH24_UNORM_STENCIL8_UINT.data();
case VK_FORMAT_D16_UNORM_S8_UINT:
return Alternatives::DEPTH16_UNORM_STENCIL8_UINT.data();
+ case VK_FORMAT_B5G6R5_UNORM_PACK16:
+ return Alternatives::B5G6R5_UNORM_PACK16.data();
default:
return nullptr;
}
@@ -224,9 +231,14 @@ std::vector<std::string> GetSupportedExtensions(vk::PhysicalDevice physical) {
return supported_extensions;
}
+bool IsExtensionSupported(std::span<const std::string> supported_extensions,
+ std::string_view extension) {
+ return std::ranges::find(supported_extensions, extension) != supported_extensions.end();
+}
+
NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical,
std::span<const std::string> exts) {
- if (std::ranges::find(exts, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME) != exts.end()) {
+ if (IsExtensionSupported(exts, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME)) {
VkPhysicalDeviceFragmentShadingRatePropertiesKHR shading_rate_props{};
shading_rate_props.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR;
@@ -239,7 +251,7 @@ NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical,
return NvidiaArchitecture::AmpereOrNewer;
}
}
- if (std::ranges::find(exts, VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME) != exts.end()) {
+ if (IsExtensionSupported(exts, VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME)) {
return NvidiaArchitecture::Turing;
}
return NvidiaArchitecture::VoltaOrOlder;
@@ -604,7 +616,8 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
break;
}
}
- if (ext_extended_dynamic_state && driver_id == VK_DRIVER_ID_MESA_RADV) {
+ const bool is_radv = driver_id == VK_DRIVER_ID_MESA_RADV;
+ if (ext_extended_dynamic_state && is_radv) {
// Mask driver version variant
const u32 version = (properties.driverVersion << 3) >> 3;
if (version < VK_MAKE_API_VERSION(0, 21, 2, 0)) {
@@ -613,6 +626,17 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
ext_extended_dynamic_state = false;
}
}
+ if (ext_vertex_input_dynamic_state && is_radv) {
+ // TODO(ameerj): Blacklist only offending driver versions
+ // TODO(ameerj): Confirm if RDNA1 is affected
+ const bool is_rdna2 =
+ IsExtensionSupported(supported_extensions, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME);
+ if (is_rdna2) {
+ LOG_WARNING(Render_Vulkan,
+ "RADV has broken VK_EXT_vertex_input_dynamic_state on RDNA2 hardware");
+ ext_vertex_input_dynamic_state = false;
+ }
+ }
sets_per_pool = 64;
const bool is_amd =
@@ -628,7 +652,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
has_broken_cube_compatibility = true;
}
}
- const bool is_amd_or_radv = is_amd || driver_id == VK_DRIVER_ID_MESA_RADV;
+ const bool is_amd_or_radv = is_amd || is_radv;
if (ext_sampler_filter_minmax && is_amd_or_radv) {
// Disable ext_sampler_filter_minmax on AMD GCN4 and lower as it is broken.
if (!is_float16_supported) {
@@ -639,6 +663,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
}
const bool is_intel_windows = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS;
+ const bool is_intel_anv = driver_id == VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA;
if (ext_vertex_input_dynamic_state && is_intel_windows) {
LOG_WARNING(Render_Vulkan, "Blacklisting Intel for VK_EXT_vertex_input_dynamic_state");
ext_vertex_input_dynamic_state = false;
@@ -652,6 +677,10 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
LOG_WARNING(Render_Vulkan, "Intel proprietary drivers do not support MSAA image blits");
cant_blit_msaa = true;
}
+ if (is_intel_anv) {
+ LOG_WARNING(Render_Vulkan, "ANV driver does not support native BGR format");
+ must_emulate_bgr565 = true;
+ }
supports_d24_depth =
IsFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT,
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 37d140ebd..34b1add16 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -354,6 +354,10 @@ public:
return cant_blit_msaa;
}
+ bool MustEmulateBGR565() const {
+ return must_emulate_bgr565;
+ }
+
private:
/// Checks if the physical device is suitable.
void CheckSuitability(bool requires_swapchain) const;
@@ -448,6 +452,7 @@ private:
bool has_nsight_graphics{}; ///< Has Nsight Graphics attached
bool supports_d24_depth{}; ///< Supports D24 depth buffers.
bool cant_blit_msaa{}; ///< Does not support MSAA<->MSAA blitting.
+ bool must_emulate_bgr565{}; ///< Emulates BGR565 by swizzling RGB565 format.
// Telemetry parameters
std::string vendor_name; ///< Device's driver name.
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index d5ba86c03..c2b66ff14 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -445,6 +445,7 @@ void Config::ReadCoreValues() {
qt_config->beginGroup(QStringLiteral("Core"));
ReadGlobalSetting(Settings::values.use_multi_core);
+ ReadGlobalSetting(Settings::values.use_extended_memory_layout);
qt_config->endGroup();
}
@@ -608,6 +609,7 @@ void Config::ReadCpuValues() {
ReadGlobalSetting(Settings::values.cpuopt_unsafe_ignore_standard_fpcr);
ReadGlobalSetting(Settings::values.cpuopt_unsafe_inaccurate_nan);
ReadGlobalSetting(Settings::values.cpuopt_unsafe_fastmem_check);
+ ReadGlobalSetting(Settings::values.cpuopt_unsafe_ignore_global_monitor);
if (global) {
ReadBasicSetting(Settings::values.cpu_debug_mode);
@@ -620,6 +622,8 @@ void Config::ReadCpuValues() {
ReadBasicSetting(Settings::values.cpuopt_misc_ir);
ReadBasicSetting(Settings::values.cpuopt_reduce_misalign_checks);
ReadBasicSetting(Settings::values.cpuopt_fastmem);
+ ReadBasicSetting(Settings::values.cpuopt_fastmem_exclusives);
+ ReadBasicSetting(Settings::values.cpuopt_recompile_exclusives);
}
qt_config->endGroup();
@@ -1019,6 +1023,7 @@ void Config::SaveCoreValues() {
qt_config->beginGroup(QStringLiteral("Core"));
WriteGlobalSetting(Settings::values.use_multi_core);
+ WriteGlobalSetting(Settings::values.use_extended_memory_layout);
qt_config->endGroup();
}
@@ -1137,6 +1142,7 @@ void Config::SaveCpuValues() {
WriteGlobalSetting(Settings::values.cpuopt_unsafe_ignore_standard_fpcr);
WriteGlobalSetting(Settings::values.cpuopt_unsafe_inaccurate_nan);
WriteGlobalSetting(Settings::values.cpuopt_unsafe_fastmem_check);
+ WriteGlobalSetting(Settings::values.cpuopt_unsafe_ignore_global_monitor);
if (global) {
WriteBasicSetting(Settings::values.cpu_debug_mode);
diff --git a/src/yuzu/configuration/configure_cpu.cpp b/src/yuzu/configuration/configure_cpu.cpp
index f66cab5d4..bf74ccc7c 100644
--- a/src/yuzu/configuration/configure_cpu.cpp
+++ b/src/yuzu/configuration/configure_cpu.cpp
@@ -36,6 +36,7 @@ void ConfigureCpu::SetConfiguration() {
ui->cpuopt_unsafe_ignore_standard_fpcr->setEnabled(runtime_lock);
ui->cpuopt_unsafe_inaccurate_nan->setEnabled(runtime_lock);
ui->cpuopt_unsafe_fastmem_check->setEnabled(runtime_lock);
+ ui->cpuopt_unsafe_ignore_global_monitor->setEnabled(runtime_lock);
ui->cpuopt_unsafe_unfuse_fma->setChecked(Settings::values.cpuopt_unsafe_unfuse_fma.GetValue());
ui->cpuopt_unsafe_reduce_fp_error->setChecked(
@@ -46,6 +47,8 @@ void ConfigureCpu::SetConfiguration() {
Settings::values.cpuopt_unsafe_inaccurate_nan.GetValue());
ui->cpuopt_unsafe_fastmem_check->setChecked(
Settings::values.cpuopt_unsafe_fastmem_check.GetValue());
+ ui->cpuopt_unsafe_ignore_global_monitor->setChecked(
+ Settings::values.cpuopt_unsafe_ignore_global_monitor.GetValue());
if (Settings::IsConfiguringGlobal()) {
ui->accuracy->setCurrentIndex(static_cast<int>(Settings::values.cpu_accuracy.GetValue()));
@@ -82,6 +85,9 @@ void ConfigureCpu::ApplyConfiguration() {
ConfigurationShared::ApplyPerGameSetting(&Settings::values.cpuopt_unsafe_fastmem_check,
ui->cpuopt_unsafe_fastmem_check,
cpuopt_unsafe_fastmem_check);
+ ConfigurationShared::ApplyPerGameSetting(&Settings::values.cpuopt_unsafe_ignore_global_monitor,
+ ui->cpuopt_unsafe_ignore_global_monitor,
+ cpuopt_unsafe_ignore_global_monitor);
}
void ConfigureCpu::changeEvent(QEvent* event) {
@@ -120,4 +126,7 @@ void ConfigureCpu::SetupPerGameUI() {
ConfigurationShared::SetColoredTristate(ui->cpuopt_unsafe_fastmem_check,
Settings::values.cpuopt_unsafe_fastmem_check,
cpuopt_unsafe_fastmem_check);
+ ConfigurationShared::SetColoredTristate(ui->cpuopt_unsafe_ignore_global_monitor,
+ Settings::values.cpuopt_unsafe_ignore_global_monitor,
+ cpuopt_unsafe_ignore_global_monitor);
}
diff --git a/src/yuzu/configuration/configure_cpu.h b/src/yuzu/configuration/configure_cpu.h
index ed9af0e9f..733e38be4 100644
--- a/src/yuzu/configuration/configure_cpu.h
+++ b/src/yuzu/configuration/configure_cpu.h
@@ -45,6 +45,7 @@ private:
ConfigurationShared::CheckState cpuopt_unsafe_ignore_standard_fpcr;
ConfigurationShared::CheckState cpuopt_unsafe_inaccurate_nan;
ConfigurationShared::CheckState cpuopt_unsafe_fastmem_check;
+ ConfigurationShared::CheckState cpuopt_unsafe_ignore_global_monitor;
const Core::System& system;
};
diff --git a/src/yuzu/configuration/configure_cpu.ui b/src/yuzu/configuration/configure_cpu.ui
index d8064db24..5d80a8c91 100644
--- a/src/yuzu/configuration/configure_cpu.ui
+++ b/src/yuzu/configuration/configure_cpu.ui
@@ -150,6 +150,18 @@
</property>
</widget>
</item>
+ <item>
+ <widget class="QCheckBox" name="cpuopt_unsafe_ignore_global_monitor">
+ <property name="toolTip">
+ <string>
+ &lt;div&gt;This option improves speed by relying only on the semantics of cmpxchg to ensure safety of exclusive access instructions. Please note this may result in deadlocks and other race conditions.&lt;/div&gt;
+ </string>
+ </property>
+ <property name="text">
+ <string>Ignore global monitor</string>
+ </property>
+ </widget>
+ </item>
</layout>
</widget>
</item>
diff --git a/src/yuzu/configuration/configure_cpu_debug.cpp b/src/yuzu/configuration/configure_cpu_debug.cpp
index 05a90963d..616a0be75 100644
--- a/src/yuzu/configuration/configure_cpu_debug.cpp
+++ b/src/yuzu/configuration/configure_cpu_debug.cpp
@@ -44,6 +44,12 @@ void ConfigureCpuDebug::SetConfiguration() {
Settings::values.cpuopt_reduce_misalign_checks.GetValue());
ui->cpuopt_fastmem->setEnabled(runtime_lock);
ui->cpuopt_fastmem->setChecked(Settings::values.cpuopt_fastmem.GetValue());
+ ui->cpuopt_fastmem_exclusives->setEnabled(runtime_lock);
+ ui->cpuopt_fastmem_exclusives->setChecked(
+ Settings::values.cpuopt_fastmem_exclusives.GetValue());
+ ui->cpuopt_recompile_exclusives->setEnabled(runtime_lock);
+ ui->cpuopt_recompile_exclusives->setChecked(
+ Settings::values.cpuopt_recompile_exclusives.GetValue());
}
void ConfigureCpuDebug::ApplyConfiguration() {
@@ -56,6 +62,8 @@ void ConfigureCpuDebug::ApplyConfiguration() {
Settings::values.cpuopt_misc_ir = ui->cpuopt_misc_ir->isChecked();
Settings::values.cpuopt_reduce_misalign_checks = ui->cpuopt_reduce_misalign_checks->isChecked();
Settings::values.cpuopt_fastmem = ui->cpuopt_fastmem->isChecked();
+ Settings::values.cpuopt_fastmem_exclusives = ui->cpuopt_fastmem_exclusives->isChecked();
+ Settings::values.cpuopt_recompile_exclusives = ui->cpuopt_recompile_exclusives->isChecked();
}
void ConfigureCpuDebug::changeEvent(QEvent* event) {
diff --git a/src/yuzu/configuration/configure_cpu_debug.ui b/src/yuzu/configuration/configure_cpu_debug.ui
index 6e635bb2f..2bc268810 100644
--- a/src/yuzu/configuration/configure_cpu_debug.ui
+++ b/src/yuzu/configuration/configure_cpu_debug.ui
@@ -144,7 +144,34 @@
</string>
</property>
<property name="text">
- <string>Enable Host MMU Emulation</string>
+ <string>Enable Host MMU Emulation (general memory instructions)</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QCheckBox" name="cpuopt_fastmem_exclusives">
+ <property name="toolTip">
+ <string>
+ &lt;div style=&quot;white-space: nowrap&quot;&gt;This optimization speeds up exclusive memory accesses by the guest program.&lt;/div&gt;
+ &lt;div style=&quot;white-space: nowrap&quot;&gt;Enabling it causes guest exclusive memory reads/writes to be done directly into memory and make use of Host's MMU.&lt;/div&gt;
+ &lt;div style=&quot;white-space: nowrap&quot;&gt;Disabling this forces all exclusive memory accesses to use Software MMU Emulation.&lt;/div&gt;
+ </string>
+ </property>
+ <property name="text">
+ <string>Enable Host MMU Emulation (exclusive memory instructions)</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QCheckBox" name="cpuopt_recompile_exclusives">
+ <property name="toolTip">
+ <string>
+ &lt;div style=&quot;white-space: nowrap&quot;&gt;This optimization speeds up exclusive memory accesses by the guest program.&lt;/div&gt;
+ &lt;div style=&quot;white-space: nowrap&quot;&gt;Enabling it reduces the overhead of fastmem failure of exclusive memory accesses.&lt;/div&gt;
+ </string>
+ </property>
+ <property name="text">
+ <string>Enable recompilation of exclusive memory instructions</string>
</property>
</widget>
</item>
diff --git a/src/yuzu/configuration/configure_general.cpp b/src/yuzu/configuration/configure_general.cpp
index 978a29fe6..08d5444ec 100644
--- a/src/yuzu/configuration/configure_general.cpp
+++ b/src/yuzu/configuration/configure_general.cpp
@@ -42,6 +42,9 @@ void ConfigureGeneral::SetConfiguration() {
ui->use_multi_core->setEnabled(runtime_lock);
ui->use_multi_core->setChecked(Settings::values.use_multi_core.GetValue());
+ ui->use_extended_memory_layout->setEnabled(runtime_lock);
+ ui->use_extended_memory_layout->setChecked(
+ Settings::values.use_extended_memory_layout.GetValue());
ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing.GetValue());
ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot.GetValue());
@@ -91,6 +94,9 @@ void ConfigureGeneral::ResetDefaults() {
void ConfigureGeneral::ApplyConfiguration() {
ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_multi_core, ui->use_multi_core,
use_multi_core);
+ ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_extended_memory_layout,
+ ui->use_extended_memory_layout,
+ use_extended_memory_layout);
if (Settings::IsConfiguringGlobal()) {
UISettings::values.confirm_before_closing = ui->toggle_check_exit->isChecked();
@@ -160,6 +166,9 @@ void ConfigureGeneral::SetupPerGameUI() {
Settings::values.use_speed_limit, use_speed_limit);
ConfigurationShared::SetColoredTristate(ui->use_multi_core, Settings::values.use_multi_core,
use_multi_core);
+ ConfigurationShared::SetColoredTristate(ui->use_extended_memory_layout,
+ Settings::values.use_extended_memory_layout,
+ use_extended_memory_layout);
connect(ui->toggle_speed_limit, &QCheckBox::clicked, ui->speed_limit, [this]() {
ui->speed_limit->setEnabled(ui->toggle_speed_limit->isChecked() &&
diff --git a/src/yuzu/configuration/configure_general.h b/src/yuzu/configuration/configure_general.h
index 85c1dd4a8..b6f3bb5ed 100644
--- a/src/yuzu/configuration/configure_general.h
+++ b/src/yuzu/configuration/configure_general.h
@@ -48,6 +48,7 @@ private:
ConfigurationShared::CheckState use_speed_limit;
ConfigurationShared::CheckState use_multi_core;
+ ConfigurationShared::CheckState use_extended_memory_layout;
const Core::System& system;
};
diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui
index bfc771135..c6ef2ab70 100644
--- a/src/yuzu/configuration/configure_general.ui
+++ b/src/yuzu/configuration/configure_general.ui
@@ -143,6 +143,13 @@
</widget>
</item>
<item>
+ <widget class="QCheckBox" name="use_extended_memory_layout">
+ <property name="text">
+ <string>Extended memory layout (6GB DRAM)</string>
+ </property>
+ </widget>
+ </item>
+ <item>
<widget class="QCheckBox" name="toggle_check_exit">
<property name="text">
<string>Confirm exit while emulation is running</string>
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index e3fd38a02..b3a8da0ea 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -2937,6 +2937,7 @@ void GMainWindow::OnLoadAmiibo() {
if (nfc_state == Service::NFP::DeviceState::TagFound ||
nfc_state == Service::NFP::DeviceState::TagMounted) {
nfc->CloseAmiibo();
+ QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed"));
return;
}
@@ -2965,6 +2966,15 @@ void GMainWindow::LoadAmiibo(const QString& filename) {
return;
}
+ // Remove amiibo if one is connected
+ const auto nfc_state = nfc->GetCurrentState();
+ if (nfc_state == Service::NFP::DeviceState::TagFound ||
+ nfc_state == Service::NFP::DeviceState::TagMounted) {
+ nfc->CloseAmiibo();
+ QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed"));
+ return;
+ }
+
QFile nfc_file{filename};
if (!nfc_file.open(QIODevice::ReadOnly)) {
QMessageBox::warning(this, tr("Error opening Amiibo data file"),
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index ff616da70..b74411c84 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -266,6 +266,7 @@ void Config::ReadValues() {
// Core
ReadSetting("Core", Settings::values.use_multi_core);
+ ReadSetting("Core", Settings::values.use_extended_memory_layout);
// Cpu
ReadSetting("Cpu", Settings::values.cpu_accuracy);
@@ -279,11 +280,14 @@ void Config::ReadValues() {
ReadSetting("Cpu", Settings::values.cpuopt_misc_ir);
ReadSetting("Cpu", Settings::values.cpuopt_reduce_misalign_checks);
ReadSetting("Cpu", Settings::values.cpuopt_fastmem);
+ ReadSetting("Cpu", Settings::values.cpuopt_fastmem_exclusives);
+ ReadSetting("Cpu", Settings::values.cpuopt_recompile_exclusives);
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_unfuse_fma);
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_reduce_fp_error);
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_ignore_standard_fpcr);
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_inaccurate_nan);
ReadSetting("Cpu", Settings::values.cpuopt_unsafe_fastmem_check);
+ ReadSetting("Cpu", Settings::values.cpuopt_unsafe_ignore_global_monitor);
// Renderer
ReadSetting("Renderer", Settings::values.renderer_backend);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index 6d613bf7a..3ac1440c9 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -174,6 +174,14 @@ cpuopt_reduce_misalign_checks =
# 0: Disabled, 1 (default): Enabled
cpuopt_fastmem =
+# Enable Host MMU Emulation for exclusive memory instructions (faster guest memory access)
+# 0: Disabled, 1 (default): Enabled
+cpuopt_fastmem_exclusives =
+
+# Enable fallback on failure of fastmem of exclusive memory instructions (faster guest memory access)
+# 0: Disabled, 1 (default): Enabled
+cpuopt_recompile_exclusives =
+
# Enable unfuse FMA (improve performance on CPUs without FMA)
# Only enabled if cpu_accuracy is set to Unsafe. Automatically chosen with cpu_accuracy = Auto-select.
# 0: Disabled, 1 (default): Enabled
@@ -199,6 +207,11 @@ cpuopt_unsafe_inaccurate_nan =
# 0: Disabled, 1 (default): Enabled
cpuopt_unsafe_fastmem_check =
+# Enable faster exclusive instructions
+# Only enabled if cpu_accuracy is set to Unsafe. Automatically chosen with cpu_accuracy = Auto-select.
+# 0: Disabled, 1 (default): Enabled
+cpuopt_unsafe_ignore_global_monitor =
+
[Renderer]
# Which backend API to use.
# 0 (default): OpenGL, 1: Vulkan