diff options
Diffstat (limited to 'src/core')
104 files changed, 4048 insertions, 1580 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 6e8d11919..1d4e92edb 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -122,6 +122,8 @@ add_library(core STATIC frontend/applets/error.h frontend/applets/general_frontend.cpp frontend/applets/general_frontend.h + frontend/applets/mii.cpp + frontend/applets/mii.h frontend/applets/profile_select.cpp frontend/applets/profile_select.h frontend/applets/software_keyboard.cpp @@ -152,6 +154,7 @@ add_library(core STATIC hle/api_version.h hle/ipc.h hle/ipc_helpers.h + hle/kernel/board/nintendo/nx/k_memory_layout.h hle/kernel/board/nintendo/nx/k_system_control.cpp hle/kernel/board/nintendo/nx/k_system_control.h hle/kernel/board/nintendo/nx/secure_monitor.h @@ -164,6 +167,7 @@ add_library(core STATIC hle/kernel/hle_ipc.h hle/kernel/init/init_slab_setup.cpp hle/kernel/init/init_slab_setup.h + hle/kernel/initial_process.h hle/kernel/k_address_arbiter.cpp hle/kernel/k_address_arbiter.h hle/kernel/k_address_space_info.cpp @@ -205,6 +209,8 @@ add_library(core STATIC hle/kernel/k_memory_region.h hle/kernel/k_memory_region_type.h hle/kernel/k_page_bitmap.h + hle/kernel/k_page_buffer.cpp + hle/kernel/k_page_buffer.h hle/kernel/k_page_heap.cpp hle/kernel/k_page_heap.h hle/kernel/k_page_linked_list.h @@ -242,6 +248,8 @@ add_library(core STATIC hle/kernel/k_system_control.h hle/kernel/k_thread.cpp hle/kernel/k_thread.h + hle/kernel/k_thread_local_page.cpp + hle/kernel/k_thread_local_page.h hle/kernel/k_thread_queue.cpp hle/kernel/k_thread_queue.h hle/kernel/k_trace.h @@ -298,6 +306,8 @@ add_library(core STATIC hle/service/am/applets/applet_error.h hle/service/am/applets/applet_general_backend.cpp hle/service/am/applets/applet_general_backend.h + hle/service/am/applets/applet_mii.cpp + hle/service/am/applets/applet_mii.h hle/service/am/applets/applet_profile_select.cpp hle/service/am/applets/applet_profile_select.h hle/service/am/applets/applet_software_keyboard.cpp @@ -467,6 +477,8 @@ add_library(core STATIC hle/service/mii/types.h hle/service/mm/mm_u.cpp hle/service/mm/mm_u.h + hle/service/mnpp/mnpp_app.cpp + hle/service/mnpp/mnpp_app.h hle/service/ncm/ncm.cpp hle/service/ncm/ncm.h hle/service/nfc/nfc.cpp diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 689e3ceb5..c60322442 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -6,6 +6,7 @@ #include <array> #include <vector> +#include "common/common_funcs.h" #include "common/common_types.h" #include "core/hardware_properties.h" @@ -24,8 +25,11 @@ class CPUInterruptHandler; using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>; /// Generic ARMv8 CPU interface -class ARM_Interface : NonCopyable { +class ARM_Interface { public: + YUZU_NON_COPYABLE(ARM_Interface); + YUZU_NON_MOVEABLE(ARM_Interface); + explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_, bool uses_wall_clock_) : system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{ diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index b0d89c539..c1c843b8f 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -137,6 +137,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS; config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; config.only_detect_misalignment_via_page_table_on_page_boundary = true; + config.fastmem_exclusive_access = true; + config.recompile_on_exclusive_fastmem_failure = true; // Multi-process state config.processor_id = core_index; @@ -146,8 +148,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* config.wall_clock_cntpct = uses_wall_clock; // Code cache size - config.code_cache_size = 512_MiB; - config.far_code_offset = 400_MiB; + config.code_cache_size = 128_MiB; + config.far_code_offset = 100_MiB; // Safe optimizations if (Settings::values.cpu_debug_mode) { @@ -178,6 +180,12 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* if (!Settings::values.cpuopt_fastmem) { config.fastmem_pointer = nullptr; } + if (!Settings::values.cpuopt_fastmem_exclusives) { + config.fastmem_exclusive_access = false; + } + if (!Settings::values.cpuopt_recompile_exclusives) { + config.recompile_on_exclusive_fastmem_failure = false; + } } // Unsafe optimizations @@ -195,6 +203,9 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* if (Settings::values.cpuopt_unsafe_inaccurate_nan) { config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN; } + if (Settings::values.cpuopt_unsafe_ignore_global_monitor) { + config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor; + } } // Curated optimizations @@ -203,6 +214,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA; config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreStandardFPCRValue; config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN; + config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor; } return std::make_unique<Dynarmic::A32::Jit>(config); diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 56836bd05..aa74fce4d 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -185,6 +185,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* config.fastmem_pointer = page_table->fastmem_arena; config.fastmem_address_space_bits = address_space_bits; config.silently_mirror_fastmem = false; + + config.fastmem_exclusive_access = true; + config.recompile_on_exclusive_fastmem_failure = true; } // Multi-process state @@ -205,8 +208,8 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* config.wall_clock_cntpct = uses_wall_clock; // Code cache size - config.code_cache_size = 512_MiB; - config.far_code_offset = 400_MiB; + config.code_cache_size = 128_MiB; + config.far_code_offset = 100_MiB; // Safe optimizations if (Settings::values.cpu_debug_mode) { @@ -237,6 +240,12 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* if (!Settings::values.cpuopt_fastmem) { config.fastmem_pointer = nullptr; } + if (!Settings::values.cpuopt_fastmem_exclusives) { + config.fastmem_exclusive_access = false; + } + if (!Settings::values.cpuopt_recompile_exclusives) { + config.recompile_on_exclusive_fastmem_failure = false; + } } // Unsafe optimizations @@ -254,6 +263,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* if (Settings::values.cpuopt_unsafe_fastmem_check) { config.fastmem_address_space_bits = 64; } + if (Settings::values.cpuopt_unsafe_ignore_global_monitor) { + config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor; + } } // Curated optimizations @@ -262,6 +274,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA; config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN; config.fastmem_address_space_bits = 64; + config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor; } return std::make_shared<Dynarmic::A64::Jit>(config); diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.cpp b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp index 397d054a8..ea6b224e0 100644 --- a/src/core/arm/dynarmic/arm_exclusive_monitor.cpp +++ b/src/core/arm/dynarmic/arm_exclusive_monitor.cpp @@ -37,8 +37,8 @@ u128 DynarmicExclusiveMonitor::ExclusiveRead128(std::size_t core_index, VAddr ad }); } -void DynarmicExclusiveMonitor::ClearExclusive() { - monitor.Clear(); +void DynarmicExclusiveMonitor::ClearExclusive(std::size_t core_index) { + monitor.ClearProcessor(core_index); } bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) { diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.h b/src/core/arm/dynarmic/arm_exclusive_monitor.h index 265c4ecef..5a15b43ef 100644 --- a/src/core/arm/dynarmic/arm_exclusive_monitor.h +++ b/src/core/arm/dynarmic/arm_exclusive_monitor.h @@ -29,7 +29,7 @@ public: u32 ExclusiveRead32(std::size_t core_index, VAddr addr) override; u64 ExclusiveRead64(std::size_t core_index, VAddr addr) override; u128 ExclusiveRead128(std::size_t core_index, VAddr addr) override; - void ClearExclusive() override; + void ClearExclusive(std::size_t core_index) override; bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override; bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override; diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h index 62f6e6023..9914ca3da 100644 --- a/src/core/arm/exclusive_monitor.h +++ b/src/core/arm/exclusive_monitor.h @@ -23,7 +23,7 @@ public: virtual u32 ExclusiveRead32(std::size_t core_index, VAddr addr) = 0; virtual u64 ExclusiveRead64(std::size_t core_index, VAddr addr) = 0; virtual u128 ExclusiveRead128(std::size_t core_index, VAddr addr) = 0; - virtual void ClearExclusive() = 0; + virtual void ClearExclusive(std::size_t core_index) = 0; virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0; virtual bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) = 0; diff --git a/src/core/core.cpp b/src/core/core.cpp index 3f9a7f44b..c60a784c3 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -28,7 +28,9 @@ #include "core/file_sys/vfs_real.h" #include "core/hardware_interrupt_manager.h" #include "core/hid/hid_core.h" +#include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" @@ -252,9 +254,16 @@ struct System::Impl { } telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider); + + // Create a resource limit for the process. + const auto physical_memory_size = + kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application); + auto* resource_limit = Kernel::CreateResourceLimitForProcess(system, physical_memory_size); + + // Create the process. auto main_process = Kernel::KProcess::Create(system.Kernel()); ASSERT(Kernel::KProcess::Initialize(main_process, system, "main", - Kernel::KProcess::ProcessType::Userland) + Kernel::KProcess::ProcessType::Userland, resource_limit) .IsSuccess()); const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); if (load_result != Loader::ResultStatus::Success) { @@ -317,7 +326,9 @@ struct System::Impl { is_powered_on = false; exit_lock = false; - gpu_core->NotifyShutdown(); + if (gpu_core != nullptr) { + gpu_core->NotifyShutdown(); + } services.reset(); service_manager.reset(); diff --git a/src/core/device_memory.cpp b/src/core/device_memory.cpp index f19c0515f..e6bc63086 100644 --- a/src/core/device_memory.cpp +++ b/src/core/device_memory.cpp @@ -3,10 +3,13 @@ // Refer to the license.txt file included. #include "core/device_memory.h" +#include "hle/kernel/board/nintendo/nx/k_system_control.h" namespace Core { -DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size, 1ULL << 39} {} +DeviceMemory::DeviceMemory() + : buffer{Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize(), + 1ULL << 39} {} DeviceMemory::~DeviceMemory() = default; } // namespace Core diff --git a/src/core/device_memory.h b/src/core/device_memory.h index c4d17705f..daeb551ea 100644 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h @@ -12,12 +12,8 @@ namespace Core { namespace DramMemoryMap { enum : u64 { Base = 0x80000000ULL, - Size = 0x100000000ULL, - End = Base + Size, KernelReserveBase = Base + 0x60000, SlabHeapBase = KernelReserveBase + 0x85000, - SlapHeapSize = 0xa21000, - SlabHeapEnd = SlabHeapBase + SlapHeapSize, }; }; // namespace DramMemoryMap diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index 7c0950bb0..f19ac4607 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp @@ -128,15 +128,6 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const { if (exefs == nullptr) return exefs; - if (Settings::values.dump_exefs) { - LOG_INFO(Loader, "Dumping ExeFS for title_id={:016X}", title_id); - const auto dump_dir = fs_controller.GetModificationDumpRoot(title_id); - if (dump_dir != nullptr) { - const auto exefs_dir = GetOrCreateDirectoryRelative(dump_dir, "/exefs"); - VfsRawCopyD(exefs, exefs_dir); - } - } - const auto& disabled = Settings::values.disabled_addons[title_id]; const auto update_disabled = std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); @@ -179,6 +170,15 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const { } } + if (Settings::values.dump_exefs) { + LOG_INFO(Loader, "Dumping ExeFS for title_id={:016X}", title_id); + const auto dump_dir = fs_controller.GetModificationDumpRoot(title_id); + if (dump_dir != nullptr) { + const auto exefs_dir = GetOrCreateDirectoryRelative(dump_dir, "/exefs"); + VfsRawCopyD(exefs, exefs_dir); + } + } + return exefs; } diff --git a/src/core/file_sys/vfs.h b/src/core/file_sys/vfs.h index 3e625fad6..1b9365853 100644 --- a/src/core/file_sys/vfs.h +++ b/src/core/file_sys/vfs.h @@ -12,6 +12,7 @@ #include <type_traits> #include <vector> +#include "common/common_funcs.h" #include "common/common_types.h" #include "core/file_sys/vfs_types.h" @@ -29,8 +30,11 @@ enum class VfsEntryType { // A class representing an abstract filesystem. A default implementation given the root VirtualDir // is provided for convenience, but if the Vfs implementation has any additional state or // functionality, they will need to override. -class VfsFilesystem : NonCopyable { +class VfsFilesystem { public: + YUZU_NON_COPYABLE(VfsFilesystem); + YUZU_NON_MOVEABLE(VfsFilesystem); + explicit VfsFilesystem(VirtualDir root); virtual ~VfsFilesystem(); @@ -77,8 +81,12 @@ protected: }; // A class representing a file in an abstract filesystem. -class VfsFile : NonCopyable { +class VfsFile { public: + YUZU_NON_COPYABLE(VfsFile); + YUZU_NON_MOVEABLE(VfsFile); + + VfsFile() = default; virtual ~VfsFile(); // Retrieves the file name. @@ -176,8 +184,12 @@ public: }; // A class representing a directory in an abstract filesystem. -class VfsDirectory : NonCopyable { +class VfsDirectory { public: + YUZU_NON_COPYABLE(VfsDirectory); + YUZU_NON_MOVEABLE(VfsDirectory); + + VfsDirectory() = default; virtual ~VfsDirectory(); // Retrives the file located at path as if the current directory was root. Returns nullptr if diff --git a/src/core/frontend/applets/mii.cpp b/src/core/frontend/applets/mii.cpp new file mode 100644 index 000000000..1c05ff412 --- /dev/null +++ b/src/core/frontend/applets/mii.cpp @@ -0,0 +1,19 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/logging/log.h" +#include "core/frontend/applets/mii.h" + +namespace Core::Frontend { + +MiiApplet::~MiiApplet() = default; + +void DefaultMiiApplet::ShowMii( + const MiiParameters& parameters, + const std::function<void(const Core::Frontend::MiiParameters& parameters)> callback) const { + LOG_INFO(Service_HID, "(STUBBED) called"); + callback(parameters); +} + +} // namespace Core::Frontend diff --git a/src/core/frontend/applets/mii.h b/src/core/frontend/applets/mii.h new file mode 100644 index 000000000..1fc40a9c6 --- /dev/null +++ b/src/core/frontend/applets/mii.h @@ -0,0 +1,35 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <functional> + +#include "core/hle/result.h" +#include "core/hle/service/mii/mii_manager.h" + +namespace Core::Frontend { + +struct MiiParameters { + bool is_editable; + Service::Mii::MiiInfo mii_data{}; +}; + +class MiiApplet { +public: + virtual ~MiiApplet(); + + virtual void ShowMii(const MiiParameters& parameters, + const std::function<void(const Core::Frontend::MiiParameters& parameters)> + callback) const = 0; +}; + +class DefaultMiiApplet final : public MiiApplet { +public: + void ShowMii(const MiiParameters& parameters, + const std::function<void(const Core::Frontend::MiiParameters& parameters)> + callback) const override; +}; + +} // namespace Core::Frontend diff --git a/src/core/frontend/applets/profile_select.cpp b/src/core/frontend/applets/profile_select.cpp index 3e4f90be2..4c58c310f 100644 --- a/src/core/frontend/applets/profile_select.cpp +++ b/src/core/frontend/applets/profile_select.cpp @@ -13,8 +13,7 @@ ProfileSelectApplet::~ProfileSelectApplet() = default; void DefaultProfileSelectApplet::SelectProfile( std::function<void(std::optional<Common::UUID>)> callback) const { Service::Account::ProfileManager manager; - callback(manager.GetUser(Settings::values.current_user.GetValue()) - .value_or(Common::UUID{Common::INVALID_UUID})); + callback(manager.GetUser(Settings::values.current_user.GetValue()).value_or(Common::UUID{})); LOG_INFO(Service_ACC, "called, selecting current user instead of prompting..."); } diff --git a/src/core/frontend/emu_window.h b/src/core/frontend/emu_window.h index e413a520a..b3bffecb2 100644 --- a/src/core/frontend/emu_window.h +++ b/src/core/frontend/emu_window.h @@ -42,11 +42,20 @@ public: context.MakeCurrent(); } ~Scoped() { - context.DoneCurrent(); + if (active) { + context.DoneCurrent(); + } + } + + /// In the event that context was destroyed before the Scoped is destroyed, this provides a + /// mechanism to prevent calling a destroyed object's method during the deconstructor + void Cancel() { + active = false; } private: GraphicsContext& context; + bool active{true}; }; /// Calls MakeCurrent on the context and calls DoneCurrent when the scope for the returned value diff --git a/src/core/hid/emulated_console.h b/src/core/hid/emulated_console.h index 707419102..5eb170823 100644 --- a/src/core/hid/emulated_console.h +++ b/src/core/hid/emulated_console.h @@ -10,6 +10,7 @@ #include <mutex> #include <unordered_map> +#include "common/common_funcs.h" #include "common/common_types.h" #include "common/input.h" #include "common/param_package.h" diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index a7cdf45e6..7e05666d6 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp @@ -269,7 +269,8 @@ void EmulatedController::ReloadInput() { } // Use a common UUID for TAS - const auto tas_uuid = Common::UUID{0x0, 0x7A5}; + static constexpr Common::UUID TAS_UUID = Common::UUID{ + {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xA5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}; // Register TAS devices. No need to force update for (std::size_t index = 0; index < tas_button_devices.size(); ++index) { @@ -278,8 +279,8 @@ void EmulatedController::ReloadInput() { } tas_button_devices[index]->SetCallback({ .on_change = - [this, index, tas_uuid](const Common::Input::CallbackStatus& callback) { - SetButton(callback, index, tas_uuid); + [this, index](const Common::Input::CallbackStatus& callback) { + SetButton(callback, index, TAS_UUID); }, }); } @@ -290,8 +291,8 @@ void EmulatedController::ReloadInput() { } tas_stick_devices[index]->SetCallback({ .on_change = - [this, index, tas_uuid](const Common::Input::CallbackStatus& callback) { - SetStick(callback, index, tas_uuid); + [this, index](const Common::Input::CallbackStatus& callback) { + SetStick(callback, index, TAS_UUID); }, }); } @@ -884,6 +885,12 @@ bool EmulatedController::TestVibration(std::size_t device_index) { return SetVibration(device_index, DEFAULT_VIBRATION_VALUE); } +bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) { + LOG_INFO(Service_HID, "Set polling mode {}", polling_mode); + auto& output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)]; + return output_device->SetPollingMode(polling_mode) == Common::Input::PollingError::None; +} + void EmulatedController::SetLedPattern() { for (auto& device : output_devices) { if (!device) { diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h index a63a83cce..aa52f9572 100644 --- a/src/core/hid/emulated_controller.h +++ b/src/core/hid/emulated_controller.h @@ -13,8 +13,6 @@ #include "common/common_types.h" #include "common/input.h" #include "common/param_package.h" -#include "common/point.h" -#include "common/quaternion.h" #include "common/settings.h" #include "common/vector_math.h" #include "core/hid/hid_types.h" @@ -301,16 +299,23 @@ public: /** * Sends a specific vibration to the output device - * @return returns true if vibration had no errors + * @return true if vibration had no errors */ bool SetVibration(std::size_t device_index, VibrationValue vibration); /** * Sends a small vibration to the output device - * @return returns true if SetVibration was successfull + * @return true if SetVibration was successfull */ bool TestVibration(std::size_t device_index); + /** + * Sets the desired data to be polled from a controller + * @param polling_mode type of input desired buttons, gyro, nfc, ir, etc. + * @return true if SetPollingMode was successfull + */ + bool SetPollingMode(Common::Input::PollingMode polling_mode); + /// Returns the led pattern corresponding to this emulated controller LedPattern GetLedPattern() const; diff --git a/src/core/hid/hid_core.h b/src/core/hid/hid_core.h index 837f7de49..717f605e7 100644 --- a/src/core/hid/hid_core.h +++ b/src/core/hid/hid_core.h @@ -6,6 +6,7 @@ #include <memory> +#include "common/common_funcs.h" #include "core/hid/hid_types.h" namespace Core::HID { diff --git a/src/core/hid/input_converter.cpp b/src/core/hid/input_converter.cpp index 860aab400..cd41607a7 100644 --- a/src/core/hid/input_converter.cpp +++ b/src/core/hid/input_converter.cpp @@ -28,7 +28,7 @@ Common::Input::BatteryStatus TransformToBattery(const Common::Input::CallbackSta if (value > 0.8f) { battery = Common::Input::BatteryLevel::Full; } - if (value >= 1.0f) { + if (value >= 0.95f) { battery = Common::Input::BatteryLevel::Charging; } break; diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index cf204f570..3c4e45fcd 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h @@ -385,7 +385,7 @@ public: T PopRaw(); template <class T> - std::shared_ptr<T> PopIpcInterface() { + std::weak_ptr<T> PopIpcInterface() { ASSERT(context->Session()->IsDomain()); ASSERT(context->GetDomainMessageHeader().input_object_count > 0); return context->GetDomainHandler<T>(Pop<u32>() - 1); @@ -404,6 +404,11 @@ inline s32 RequestParser::Pop() { return static_cast<s32>(Pop<u32>()); } +// Ignore the -Wclass-memaccess warning on memcpy for non-trivially default constructible objects. +#if defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wclass-memaccess" +#endif template <typename T> void RequestParser::PopRaw(T& value) { static_assert(std::is_trivially_copyable_v<T>, @@ -411,6 +416,9 @@ void RequestParser::PopRaw(T& value) { std::memcpy(&value, cmdbuf + index, sizeof(T)); index += (sizeof(T) + 3) / 4; // round up to word length } +#if defined(__GNUC__) +#pragma GCC diagnostic pop +#endif template <typename T> T RequestParser::PopRaw() { diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h new file mode 100644 index 000000000..01e225088 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h @@ -0,0 +1,13 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel { + +constexpr inline PAddr MainMemoryAddress = 0x80000000; + +} // namespace Kernel diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index 6f335c251..8027bec00 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp @@ -5,6 +5,7 @@ #include <random> #include "common/literals.h" +#include "common/settings.h" #include "core/hle/kernel/board/nintendo/nx/k_system_control.h" #include "core/hle/kernel/board/nintendo/nx/secure_monitor.h" @@ -28,33 +29,20 @@ namespace { using namespace Common::Literals; -u32 GetMemoryModeForInit() { - return 0x01; -} - u32 GetMemorySizeForInit() { - return 0; + return Settings::values.use_extended_memory_layout ? Smc::MemorySize_6GB : Smc::MemorySize_4GB; } Smc::MemoryArrangement GetMemoryArrangeForInit() { - switch (GetMemoryModeForInit() & 0x3F) { - case 0x01: - default: - return Smc::MemoryArrangement_4GB; - case 0x02: - return Smc::MemoryArrangement_4GBForAppletDev; - case 0x03: - return Smc::MemoryArrangement_4GBForSystemDev; - case 0x11: - return Smc::MemoryArrangement_6GB; - case 0x12: - return Smc::MemoryArrangement_6GBForAppletDev; - case 0x21: - return Smc::MemoryArrangement_8GB; - } + return Settings::values.use_extended_memory_layout ? Smc::MemoryArrangement_6GB + : Smc::MemoryArrangement_4GB; } } // namespace +size_t KSystemControl::Init::GetRealMemorySize() { + return GetIntendedMemorySize(); +} + // Initialization. size_t KSystemControl::Init::GetIntendedMemorySize() { switch (GetMemorySizeForInit()) { @@ -69,7 +57,13 @@ size_t KSystemControl::Init::GetIntendedMemorySize() { } PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) { - return base_address; + const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize(); + const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize(); + if (intended_dram_size * 2 < real_dram_size) { + return base_address; + } else { + return base_address + ((real_dram_size - intended_dram_size) / 2); + } } bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index 52f230ced..df2a17f2a 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h @@ -13,6 +13,7 @@ public: class Init { public: // Initialization. + static std::size_t GetRealMemorySize(); static std::size_t GetIntendedMemorySize(); static PAddr GetKernelPhysicalBaseAddress(u64 base_address); static bool ShouldIncreaseThreadResourceLimit(); diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index e19544c54..9f2175f82 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -45,7 +45,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co LOG_CRITICAL(IPC, "object_id {} is too big!", object_id); return false; } - return DomainHandler(object_id - 1) != nullptr; + return DomainHandler(object_id - 1).lock() != nullptr; } else { return session_handler != nullptr; } @@ -53,9 +53,6 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co void SessionRequestHandler::ClientConnected(KServerSession* session) { session->ClientConnected(shared_from_this()); - - // Ensure our server session is tracked globally. - kernel.RegisterServerSession(session); } void SessionRequestHandler::ClientDisconnected(KServerSession* session) { diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 754b41ff6..670cc741c 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -94,6 +94,7 @@ protected: std::weak_ptr<ServiceThread> service_thread; }; +using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>; /** @@ -139,7 +140,7 @@ public: } } - SessionRequestHandlerPtr DomainHandler(std::size_t index) const { + SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const { ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index); return domain_handlers.at(index); } @@ -328,10 +329,10 @@ public: template <typename T> std::shared_ptr<T> GetDomainHandler(std::size_t index) const { - return std::static_pointer_cast<T>(manager->DomainHandler(index)); + return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock()); } - void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) { + void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) { manager = std::move(manager_); } @@ -374,7 +375,7 @@ private: u32 handles_offset{}; u32 domain_offset{}; - std::shared_ptr<SessionRequestManager> manager; + std::weak_ptr<SessionRequestManager> manager; KernelCore& kernel; Core::Memory::Memory& memory; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 36fc0944a..b0f773ee0 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -7,19 +7,23 @@ #include "common/common_funcs.h" #include "common/common_types.h" #include "core/core.h" +#include "core/device_memory.h" #include "core/hardware_properties.h" #include "core/hle/kernel/init/init_slab_setup.h" #include "core/hle/kernel/k_code_memory.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_page_buffer.h" #include "core/hle/kernel/k_port.h" #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_session.h" #include "core/hle/kernel/k_shared_memory.h" +#include "core/hle/kernel/k_shared_memory_info.h" #include "core/hle/kernel/k_system_control.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_local_page.h" #include "core/hle/kernel/k_transfer_memory.h" namespace Kernel::Init { @@ -32,9 +36,13 @@ namespace Kernel::Init { HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ + HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \ HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \ + HANDLER(KThreadLocalPage, \ + (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ + ##__VA_ARGS__) \ HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) namespace { @@ -50,38 +58,46 @@ enum KSlabType : u32 { // Constexpr counts. constexpr size_t SlabCountKProcess = 80; constexpr size_t SlabCountKThread = 800; -constexpr size_t SlabCountKEvent = 700; +constexpr size_t SlabCountKEvent = 900; constexpr size_t SlabCountKInterruptEvent = 100; -constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew. +constexpr size_t SlabCountKPort = 384; constexpr size_t SlabCountKSharedMemory = 80; constexpr size_t SlabCountKTransferMemory = 200; constexpr size_t SlabCountKCodeMemory = 10; constexpr size_t SlabCountKDeviceAddressSpace = 300; -constexpr size_t SlabCountKSession = 933; +constexpr size_t SlabCountKSession = 1133; constexpr size_t SlabCountKLightSession = 100; constexpr size_t SlabCountKObjectName = 7; constexpr size_t SlabCountKResourceLimit = 5; constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; -constexpr size_t SlabCountKAlpha = 1; -constexpr size_t SlabCountKBeta = 6; +constexpr size_t SlabCountKIoPool = 1; +constexpr size_t SlabCountKIoRegion = 6; constexpr size_t SlabCountExtraKThread = 160; +/// Helper function to translate from the slab virtual address to the reserved location in physical +/// memory. +static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) { + slab_addr -= memory_layout.GetSlabRegionAddress(); + return slab_addr + Core::DramMemoryMap::SlabHeapBase; +} + template <typename T> VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, size_t num_objects) { - // TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for - // kernel object type T with the backing kernel memory pointer once we emulate kernel memory. const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); VAddr start = Common::AlignUp(address, alignof(T)); - // This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with - // the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free - // host memory. - void* backing_kernel_memory{}; + // This should use the virtual memory address passed in, but currently, we do not setup the + // kernel virtual memory layout. Instead, we simply map these at a region of physical memory + // that we reserve for the slab heaps. + // TODO(bunnei): Fix this once we support the kernel virtual memory layout. if (size > 0) { + void* backing_kernel_memory{ + system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; + const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); ASSERT(region != nullptr); ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); @@ -91,6 +107,12 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd return start + size; } +size_t CalculateSlabHeapGapSize() { + constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB; + static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); + return KernelSlabHeapGapSize; +} + } // namespace KSlabResourceCounts KSlabResourceCounts::CreateDefault() { @@ -109,8 +131,8 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() { .num_KObjectName = SlabCountKObjectName, .num_KResourceLimit = SlabCountKResourceLimit, .num_KDebug = SlabCountKDebug, - .num_KAlpha = SlabCountKAlpha, - .num_KBeta = SlabCountKBeta, + .num_KIoPool = SlabCountKIoPool, + .num_KIoRegion = SlabCountKIoRegion, }; } @@ -136,11 +158,34 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) { #undef ADD_SLAB_SIZE // Add the reserved size. - size += KernelSlabHeapGapsSize; + size += CalculateSlabHeapGapSize(); return size; } +void InitializeKPageBufferSlabHeap(Core::System& system) { + auto& kernel = system.Kernel(); + + const auto& counts = kernel.SlabResourceCounts(); + const size_t num_pages = + counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; + const size_t slab_size = num_pages * PageSize; + + // Reserve memory from the system resource limit. + ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); + + // Allocate memory for the slab. + constexpr auto AllocateOption = KMemoryManager::EncodeOption( + KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); + const PAddr slab_address = + kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); + ASSERT(slab_address != 0); + + // Initialize the slabheap. + KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), + slab_size); +} + void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { auto& kernel = system.Kernel(); @@ -160,13 +205,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { } // Create an array to represent the gaps between the slabs. - const size_t total_gap_size = KernelSlabHeapGapsSize; + const size_t total_gap_size = CalculateSlabHeapGapSize(); std::array<size_t, slab_types.size()> slab_gaps; - for (size_t i = 0; i < slab_gaps.size(); i++) { + for (auto& slab_gap : slab_gaps) { // Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange // is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we // will include it ourselves. - slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size); + slab_gap = KSystemControl::GenerateRandomRange(0, total_gap_size); } // Sort the array, so that we can treat differences between values as offsets to the starts of @@ -177,13 +222,21 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { } } - for (size_t i = 0; i < slab_types.size(); i++) { + // Track the gaps, so that we can free them to the unused slab tree. + VAddr gap_start = address; + size_t gap_size = 0; + + for (size_t i = 0; i < slab_gaps.size(); i++) { // Add the random gap to the address. - address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; + const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; + address += cur_gap; + gap_size += cur_gap; #define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ case KSlabType_##NAME: \ - address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \ + if (COUNT > 0) { \ + address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \ + } \ break; // Initialize the slabheap. @@ -192,7 +245,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) // If we somehow get an invalid type, abort. default: - UNREACHABLE(); + UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]); + } + + // If we've hit the end of a gap, free it. + if (gap_start + gap_size != address) { + gap_start = address; + gap_size = 0; } } } diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h index a8f7e0918..f54b67d02 100644 --- a/src/core/hle/kernel/init/init_slab_setup.h +++ b/src/core/hle/kernel/init/init_slab_setup.h @@ -32,12 +32,13 @@ struct KSlabResourceCounts { size_t num_KObjectName; size_t num_KResourceLimit; size_t num_KDebug; - size_t num_KAlpha; - size_t num_KBeta; + size_t num_KIoPool; + size_t num_KIoRegion; }; void InitializeSlabResourceCounts(KernelCore& kernel); size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); +void InitializeKPageBufferSlabHeap(Core::System& system); void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); } // namespace Kernel::Init diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h new file mode 100644 index 000000000..25b27909c --- /dev/null +++ b/src/core/hle/kernel/initial_process.h @@ -0,0 +1,23 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" +#include "common/literals.h" +#include "core/hle/kernel/board/nintendo/nx/k_memory_layout.h" +#include "core/hle/kernel/board/nintendo/nx/k_system_control.h" + +namespace Kernel { + +using namespace Common::Literals; + +constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB; + +static inline PAddr GetInitialProcessBinaryPhysicalAddress() { + return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress( + MainMemoryAddress); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 783c69858..8cdd0490f 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -49,7 +49,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu } } else { // Otherwise, clear our exclusive hold and finish - monitor.ClearExclusive(); + monitor.ClearExclusive(current_core); } // We're done. @@ -78,7 +78,7 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 } } else { // Otherwise, clear our exclusive hold and finish. - monitor.ClearExclusive(); + monitor.ClearExclusive(current_core); } // We're done. @@ -115,7 +115,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { { KScopedSchedulerLock sl(kernel); - auto it = thread_tree.nfind_light({addr, -1}); + auto it = thread_tree.nfind_key({addr, -1}); while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { // End the thread's wait. @@ -148,7 +148,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 return ResultInvalidState; } - auto it = thread_tree.nfind_light({addr, -1}); + auto it = thread_tree.nfind_key({addr, -1}); while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { // End the thread's wait. @@ -171,7 +171,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 { [[maybe_unused]] const KScopedSchedulerLock sl(kernel); - auto it = thread_tree.nfind_light({addr, -1}); + auto it = thread_tree.nfind_key({addr, -1}); // Determine the updated value. s32 new_value{}; if (count <= 0) { diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index 165b76747..05779f2d5 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -20,8 +20,6 @@ class KernelCore; class KProcess; #define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ - YUZU_NON_COPYABLE(CLASS); \ - YUZU_NON_MOVEABLE(CLASS); \ \ private: \ friend class ::Kernel::KClassTokenGenerator; \ @@ -32,6 +30,9 @@ private: } \ \ public: \ + YUZU_NON_COPYABLE(CLASS); \ + YUZU_NON_MOVEABLE(CLASS); \ + \ using BaseClass = BASE_CLASS; \ static constexpr TypeObj GetStaticTypeObj() { \ constexpr ClassTokenType Token = ClassToken(); \ @@ -224,9 +225,9 @@ private: template <typename T> class KScopedAutoObject { +public: YUZU_NON_COPYABLE(KScopedAutoObject); -public: constexpr KScopedAutoObject() = default; constexpr KScopedAutoObject(T* o) : m_obj(o) { diff --git a/src/core/hle/kernel/k_auto_object_container.h b/src/core/hle/kernel/k_auto_object_container.h index 4eadfe99d..697cc4289 100644 --- a/src/core/hle/kernel/k_auto_object_container.h +++ b/src/core/hle/kernel/k_auto_object_container.h @@ -16,13 +16,12 @@ class KernelCore; class KProcess; class KAutoObjectWithListContainer { +public: YUZU_NON_COPYABLE(KAutoObjectWithListContainer); YUZU_NON_MOVEABLE(KAutoObjectWithListContainer); -public: using ListType = boost::intrusive::rbtree<KAutoObjectWithList>; -public: class ListAccessor : public KScopedLightLock { public: explicit ListAccessor(KAutoObjectWithListContainer* container) @@ -48,7 +47,6 @@ public: friend class ListAccessor; -public: KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {} void Initialize() {} diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index d69f7ffb7..0b225e8e0 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/alignment.h" #include "common/common_types.h" #include "core/device_memory.h" #include "core/hle/kernel/k_auto_object.h" @@ -28,8 +29,7 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr auto& page_table = m_owner->PageTable(); // Construct the page group. - KMemoryInfo kBlockInfo = page_table.QueryInfo(addr); - m_page_group = KPageLinkedList(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages()); + m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize)); // Lock the memory. R_TRY(page_table.LockForCodeMemory(addr, size)) @@ -143,4 +143,4 @@ ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { return ResultSuccess; } -} // namespace Kernel
\ No newline at end of file +} // namespace Kernel diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index aadcc297a..8e2a9593c 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -244,7 +244,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { { KScopedSchedulerLock sl(kernel); - auto it = thread_tree.nfind_light({cv_key, -1}); + auto it = thread_tree.nfind_key({cv_key, -1}); while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) { KThread* target_thread = std::addressof(*it); diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index 0720efece..2e0e8de80 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp @@ -14,7 +14,7 @@ KEvent::KEvent(KernelCore& kernel_) KEvent::~KEvent() = default; -void KEvent::Initialize(std::string&& name_) { +void KEvent::Initialize(std::string&& name_, KProcess* owner_) { // Increment reference count. // Because reference count is one on creation, this will result // in a reference count of two. Thus, when both readable and @@ -30,10 +30,8 @@ void KEvent::Initialize(std::string&& name_) { writable_event.Initialize(this, name_ + ":Writable"); // Set our owner process. - owner = kernel.CurrentProcess(); - if (owner) { - owner->Open(); - } + owner = owner_; + owner->Open(); // Mark initialized. name = std::move(name_); @@ -47,10 +45,8 @@ void KEvent::Finalize() { void KEvent::PostDestroy(uintptr_t arg) { // Release the event count resource the owner process holds. KProcess* owner = reinterpret_cast<KProcess*>(arg); - if (owner) { - owner->GetResourceLimit()->Release(LimitableResource::Events, 1); - owner->Close(); - } + owner->GetResourceLimit()->Release(LimitableResource::Events, 1); + owner->Close(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h index 3d3ec99e2..de9732ddf 100644 --- a/src/core/hle/kernel/k_event.h +++ b/src/core/hle/kernel/k_event.h @@ -22,7 +22,7 @@ public: explicit KEvent(KernelCore& kernel_); ~KEvent() override; - void Initialize(std::string&& name); + void Initialize(std::string&& name, KProcess* owner_); void Finalize() override; diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index cf95f0852..db7512ee7 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp @@ -63,7 +63,7 @@ bool KHandleTable::Remove(Handle handle) { return true; } -ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { +ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); @@ -75,7 +75,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { const auto linear_id = this->AllocateLinearId(); const auto index = this->AllocateEntry(); - m_entry_infos[index].info = {.linear_id = linear_id, .type = type}; + m_entry_infos[index].linear_id = linear_id; m_objects[index] = obj; obj->Open(); @@ -116,7 +116,7 @@ void KHandleTable::Unreserve(Handle handle) { } } -void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { +void KHandleTable::Register(Handle handle, KAutoObject* obj) { KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); @@ -132,7 +132,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { // Set the entry. ASSERT(m_objects[index] == nullptr); - m_entry_infos[index].info = {.linear_id = static_cast<u16>(linear_id), .type = type}; + m_entry_infos[index].linear_id = static_cast<u16>(linear_id); m_objects[index] = obj; obj->Open(); diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 4b114ec2f..dd27689b6 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h @@ -22,13 +22,12 @@ namespace Kernel { class KernelCore; class KHandleTable { +public: YUZU_NON_COPYABLE(KHandleTable); YUZU_NON_MOVEABLE(KHandleTable); -public: static constexpr size_t MaxTableSize = 1024; -public: explicit KHandleTable(KernelCore& kernel_); ~KHandleTable(); @@ -43,7 +42,7 @@ public: m_free_head_index = -1; // Free all entries. - for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) { + for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) { m_objects[i] = nullptr; m_entry_infos[i].next_free_index = i - 1; m_free_head_index = i; @@ -105,17 +104,8 @@ public: ResultCode Reserve(Handle* out_handle); void Unreserve(Handle handle); - template <typename T> - ResultCode Add(Handle* out_handle, T* obj) { - static_assert(std::is_base_of_v<KAutoObject, T>); - return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken()); - } - - template <typename T> - void Register(Handle handle, T* obj) { - static_assert(std::is_base_of_v<KAutoObject, T>); - return this->Register(handle, obj, obj->GetTypeObj().GetClassToken()); - } + ResultCode Add(Handle* out_handle, KAutoObject* obj); + void Register(Handle handle, KAutoObject* obj); template <typename T> bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const { @@ -161,9 +151,6 @@ public: } private: - ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type); - void Register(Handle handle, KAutoObject* obj, u16 type); - s32 AllocateEntry() { ASSERT(m_count < m_table_size); @@ -180,7 +167,7 @@ private: ASSERT(m_count > 0); m_objects[index] = nullptr; - m_entry_infos[index].next_free_index = m_free_head_index; + m_entry_infos[index].next_free_index = static_cast<s16>(m_free_head_index); m_free_head_index = index; @@ -279,19 +266,13 @@ private: } union EntryInfo { - struct { - u16 linear_id; - u16 type; - } info; - s32 next_free_index; + u16 linear_id; + s16 next_free_index; constexpr u16 GetLinearId() const { - return info.linear_id; - } - constexpr u16 GetType() const { - return info.type; + return linear_id; } - constexpr s32 GetNextFreeIndex() const { + constexpr s16 GetNextFreeIndex() const { return next_free_index; } }; diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 57ff538cc..0858827b6 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -57,11 +57,11 @@ constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemor constexpr std::size_t KernelInitialPageHeapSize = 128_KiB; constexpr std::size_t KernelSlabHeapDataSize = 5_MiB; -constexpr std::size_t KernelSlabHeapGapsSize = 2_MiB - 64_KiB; -constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize; +constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB; +constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. -constexpr std::size_t KernelSlabHeapAdditionalSize = 416_KiB; +constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000; constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; @@ -173,6 +173,10 @@ public: return Dereference(FindVirtualLinear(address)); } + const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const { + return Dereference(FindPhysicalLinear(address)); + } + const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); } diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 1b44541b1..a2f18f643 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -10,189 +10,412 @@ #include "common/scope_exit.h" #include "core/core.h" #include "core/device_memory.h" +#include "core/hle/kernel/initial_process.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_page_linked_list.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" +#include "core/memory.h" namespace Kernel { -KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {} +namespace { + +constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { + if ((type | KMemoryRegionType_DramApplicationPool) == type) { + return KMemoryManager::Pool::Application; + } else if ((type | KMemoryRegionType_DramAppletPool) == type) { + return KMemoryManager::Pool::Applet; + } else if ((type | KMemoryRegionType_DramSystemPool) == type) { + return KMemoryManager::Pool::System; + } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { + return KMemoryManager::Pool::SystemNonSecure; + } else { + UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); + return {}; + } +} -std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) { - const auto size{end_address - start_address}; +} // namespace + +KMemoryManager::KMemoryManager(Core::System& system_) + : system{system_}, pool_locks{ + KLightLock{system_.Kernel()}, + KLightLock{system_.Kernel()}, + KLightLock{system_.Kernel()}, + KLightLock{system_.Kernel()}, + } {} + +void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { + + // Clear the management region to zero. + const VAddr management_region_end = management_region + management_region_size; + + // Reset our manager count. + num_managers = 0; + + // Traverse the virtual memory layout tree, initializing each manager as appropriate. + while (num_managers != MaxManagerCount) { + // Locate the region that should initialize the current manager. + PAddr region_address = 0; + size_t region_size = 0; + Pool region_pool = Pool::Count; + for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { + // We only care about regions that we need to create managers for. + if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { + continue; + } - // Calculate metadata sizes - const auto ref_count_size{(size / PageSize) * sizeof(u16)}; - const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)}; - const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)}; - const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)}; - const auto total_metadata_size{manager_size + page_heap_size}; - ASSERT(manager_size <= total_metadata_size); - ASSERT(Common::IsAligned(total_metadata_size, PageSize)); + // We want to initialize the managers in order. + if (it.GetAttributes() != num_managers) { + continue; + } - // Setup region - pool = new_pool; + const PAddr cur_start = it.GetAddress(); + const PAddr cur_end = it.GetEndAddress(); + + // Validate the region. + ASSERT(cur_end != 0); + ASSERT(cur_start != 0); + ASSERT(it.GetSize() > 0); + + // Update the region's extents. + if (region_address == 0) { + region_address = cur_start; + region_size = it.GetSize(); + region_pool = GetPoolFromMemoryRegionType(it.GetType()); + } else { + ASSERT(cur_start == region_address + region_size); + + // Update the size. + region_size = cur_end - region_address; + ASSERT(GetPoolFromMemoryRegionType(it.GetType()) == region_pool); + } + } + + // If we didn't find a region, we're done. + if (region_size == 0) { + break; + } - // Initialize the manager's KPageHeap - heap.Initialize(start_address, size, page_heap_size); + // Initialize a new manager for the region. + Impl* manager = std::addressof(managers[num_managers++]); + ASSERT(num_managers <= managers.size()); + + const size_t cur_size = manager->Initialize(region_address, region_size, management_region, + management_region_end, region_pool); + management_region += cur_size; + ASSERT(management_region <= management_region_end); + + // Insert the manager into the pool list. + const auto region_pool_index = static_cast<u32>(region_pool); + if (pool_managers_tail[region_pool_index] == nullptr) { + pool_managers_head[region_pool_index] = manager; + } else { + pool_managers_tail[region_pool_index]->SetNext(manager); + manager->SetPrev(pool_managers_tail[region_pool_index]); + } + pool_managers_tail[region_pool_index] = manager; + } - // Free the memory to the heap - heap.Free(start_address, size / PageSize); + // Free each region to its corresponding heap. + size_t reserved_sizes[MaxManagerCount] = {}; + const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); + const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; + const PAddr ini_last = ini_end - 1; + for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { + if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { + // Get the manager for the region. + auto index = it.GetAttributes(); + auto& manager = managers[index]; + + const PAddr cur_start = it.GetAddress(); + const PAddr cur_last = it.GetLastAddress(); + const PAddr cur_end = it.GetEndAddress(); + + if (cur_start <= ini_start && ini_last <= cur_last) { + // Free memory before the ini to the heap. + if (cur_start != ini_start) { + manager.Free(cur_start, (ini_start - cur_start) / PageSize); + } - // Update the heap's used size - heap.UpdateUsedSize(); + // Open/reserve the ini memory. + manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); + reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; - return total_metadata_size; -} + // Free memory after the ini to the heap. + if (ini_last != cur_last) { + ASSERT(cur_end != 0); + manager.Free(ini_end, cur_end - ini_end); + } + } else { + // Ensure there's no partial overlap with the ini image. + if (cur_start <= ini_last) { + ASSERT(cur_last < ini_start); + } else { + // Otherwise, check the region for general validity. + ASSERT(cur_end != 0); + } -void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) { - ASSERT(pool < Pool::Count); - managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address); + // Free the memory to the heap. + manager.Free(cur_start, it.GetSize() / PageSize); + } + } + } + + // Update the used size for all managers. + for (size_t i = 0; i < num_managers; ++i) { + managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); + } } -VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages, - u32 option) { - // Early return if we're allocating no pages +PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { + // Early return if we're allocating no pages. if (num_pages == 0) { - return {}; + return 0; } - // Lock the pool that we're allocating from + // Lock the pool that we're allocating from. const auto [pool, dir] = DecodeOption(option); - const auto pool_index{static_cast<std::size_t>(pool)}; - std::lock_guard lock{pool_locks[pool_index]}; - - // Choose a heap based on our page size request - const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; - - // Loop, trying to iterate from each block - // TODO (bunnei): Support multiple managers - Impl& chosen_manager{managers[pool_index]}; - VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)}; + KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); + + // Choose a heap based on our page size request. + const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); + + // Loop, trying to iterate from each block. + Impl* chosen_manager = nullptr; + PAddr allocated_block = 0; + for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; + chosen_manager = this->GetNextManager(chosen_manager, dir)) { + allocated_block = chosen_manager->AllocateBlock(heap_index, true); + if (allocated_block != 0) { + break; + } + } - // If we failed to allocate, quit now - if (!allocated_block) { - return {}; + // If we failed to allocate, quit now. + if (allocated_block == 0) { + return 0; } - // If we allocated more than we need, free some - const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)}; + // If we allocated more than we need, free some. + const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); if (allocated_pages > num_pages) { - chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); + chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); } + // Open the first reference to the pages. + chosen_manager->OpenFirst(allocated_block, num_pages); + return allocated_block; } -ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, - Direction dir, u32 heap_fill_value) { - ASSERT(page_list.GetNumPages() == 0); +ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool, + Direction dir, bool random) { + // Choose a heap based on our page size request. + const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); + R_UNLESS(0 <= heap_index, ResultOutOfMemory); + + // Ensure that we don't leave anything un-freed. + auto group_guard = SCOPE_GUARD({ + for (const auto& it : out->Nodes()) { + auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); + const size_t num_pages_to_free = + std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); + manager.Free(it.GetAddress(), num_pages_to_free); + } + }); - // Early return if we're allocating no pages - if (num_pages == 0) { - return ResultSuccess; - } + // Keep allocating until we've allocated all our pages. + for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { + const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index); + for (Impl* cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; + cur_manager = this->GetNextManager(cur_manager, dir)) { + while (num_pages >= pages_per_alloc) { + // Allocate a block. + PAddr allocated_block = cur_manager->AllocateBlock(index, random); + if (allocated_block == 0) { + break; + } - // Lock the pool that we're allocating from - const auto pool_index{static_cast<std::size_t>(pool)}; - std::lock_guard lock{pool_locks[pool_index]}; + // Safely add it to our group. + { + auto block_guard = + SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); + R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); + block_guard.Cancel(); + } - // Choose a heap based on our page size request - const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)}; - if (heap_index < 0) { - return ResultOutOfMemory; + num_pages -= pages_per_alloc; + } + } } - // TODO (bunnei): Support multiple managers - Impl& chosen_manager{managers[pool_index]}; + // Only succeed if we allocated as many pages as we wanted. + R_UNLESS(num_pages == 0, ResultOutOfMemory); - // Ensure that we don't leave anything un-freed - auto group_guard = detail::ScopeExit([&] { - for (const auto& it : page_list.Nodes()) { - const auto min_num_pages{std::min<size_t>( - it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; - chosen_manager.Free(it.GetAddress(), min_num_pages); - } - }); + // We succeeded! + group_guard.Cancel(); + return ResultSuccess; +} - // Keep allocating until we've allocated all our pages - for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { - const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)}; +ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) { + ASSERT(out != nullptr); + ASSERT(out->GetNumPages() == 0); - while (num_pages >= pages_per_alloc) { - // Allocate a block - VAddr allocated_block{chosen_manager.AllocateBlock(index, false)}; - if (!allocated_block) { - break; - } + // Early return if we're allocating no pages. + R_SUCCEED_IF(num_pages == 0); - // Safely add it to our group - { - auto block_guard = detail::ScopeExit( - [&] { chosen_manager.Free(allocated_block, pages_per_alloc); }); + // Lock the pool that we're allocating from. + const auto [pool, dir] = DecodeOption(option); + KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); + + // Allocate the page group. + R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); + + // Open the first reference to the pages. + for (const auto& block : out->Nodes()) { + PAddr cur_address = block.GetAddress(); + size_t remaining_pages = block.GetNumPages(); + while (remaining_pages > 0) { + // Get the manager for the current address. + auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); + + // Process part or all of the block. + const size_t cur_pages = + std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); + manager.OpenFirst(cur_address, cur_pages); + + // Advance. + cur_address += cur_pages * PageSize; + remaining_pages -= cur_pages; + } + } - if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)}; - result.IsError()) { - return result; - } + return ResultSuccess; +} - block_guard.Cancel(); - } +ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, + u32 option, u64 process_id, u8 fill_pattern) { + ASSERT(out != nullptr); + ASSERT(out->GetNumPages() == 0); - num_pages -= pages_per_alloc; - } - } + // Decode the option. + const auto [pool, dir] = DecodeOption(option); - // Clear allocated memory. - for (const auto& it : page_list.Nodes()) { - std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, - it.GetSize()); + // Allocate the memory. + { + // Lock the pool that we're allocating from. + KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); + + // Allocate the page group. + R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); + + // Open the first reference to the pages. + for (const auto& block : out->Nodes()) { + PAddr cur_address = block.GetAddress(); + size_t remaining_pages = block.GetNumPages(); + while (remaining_pages > 0) { + // Get the manager for the current address. + auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); + + // Process part or all of the block. + const size_t cur_pages = + std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); + manager.OpenFirst(cur_address, cur_pages); + + // Advance. + cur_address += cur_pages * PageSize; + remaining_pages -= cur_pages; + } + } } - // Only succeed if we allocated as many pages as we wanted - if (num_pages) { - return ResultOutOfMemory; + // Set all the allocated memory. + for (const auto& block : out->Nodes()) { + std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, + block.GetSize()); } - // We succeeded! - group_guard.Cancel(); - return ResultSuccess; } -ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, - Direction dir, u32 heap_fill_value) { - // Early return if we're freeing no pages - if (!num_pages) { - return ResultSuccess; +void KMemoryManager::Open(PAddr address, size_t num_pages) { + // Repeatedly open references until we've done so for all pages. + while (num_pages) { + auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); + const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); + + { + KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); + manager.Open(address, cur_pages); + } + + num_pages -= cur_pages; + address += cur_pages * PageSize; } +} - // Lock the pool that we're freeing from - const auto pool_index{static_cast<std::size_t>(pool)}; - std::lock_guard lock{pool_locks[pool_index]}; +void KMemoryManager::Close(PAddr address, size_t num_pages) { + // Repeatedly close references until we've done so for all pages. + while (num_pages) { + auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); + const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); - // TODO (bunnei): Support multiple managers - Impl& chosen_manager{managers[pool_index]}; + { + KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); + manager.Close(address, cur_pages); + } - // Free all of the pages - for (const auto& it : page_list.Nodes()) { - const auto min_num_pages{std::min<size_t>( - it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; - chosen_manager.Free(it.GetAddress(), min_num_pages); + num_pages -= cur_pages; + address += cur_pages * PageSize; } +} - return ResultSuccess; +void KMemoryManager::Close(const KPageLinkedList& pg) { + for (const auto& node : pg.Nodes()) { + Close(node.GetAddress(), node.GetNumPages()); + } +} +void KMemoryManager::Open(const KPageLinkedList& pg) { + for (const auto& node : pg.Nodes()) { + Open(node.GetAddress(), node.GetNumPages()); + } +} + +size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, + VAddr management_end, Pool p) { + // Calculate management sizes. + const size_t ref_count_size = (size / PageSize) * sizeof(u16); + const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size); + const size_t manager_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize); + const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(size); + const size_t total_management_size = manager_size + page_heap_size; + ASSERT(manager_size <= total_management_size); + ASSERT(management + total_management_size <= management_end); + ASSERT(Common::IsAligned(total_management_size, PageSize)); + + // Setup region. + pool = p; + management_region = management; + page_reference_counts.resize( + Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); + ASSERT(Common::IsAligned(management_region, PageSize)); + + // Initialize the manager's KPageHeap. + heap.Initialize(address, size, management + manager_size, page_heap_size); + + return total_management_size; } -std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) { - const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16); - const std::size_t optimize_map_size = +size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { + const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); + const size_t optimize_map_size = (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / Common::BitSize<u64>()) * sizeof(u64); - const std::size_t manager_meta_size = - Common::AlignUp(optimize_map_size + ref_count_size, PageSize); - const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); + const size_t manager_meta_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize); + const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); return manager_meta_size + page_heap_size; } diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index abd6c8ace..18775b262 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h @@ -5,10 +5,12 @@ #pragma once #include <array> -#include <mutex> #include <tuple> +#include "common/common_funcs.h" #include "common/common_types.h" +#include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_page_heap.h" #include "core/hle/result.h" @@ -20,8 +22,11 @@ namespace Kernel { class KPageLinkedList; -class KMemoryManager final : NonCopyable { +class KMemoryManager final { public: + YUZU_NON_COPYABLE(KMemoryManager); + YUZU_NON_MOVEABLE(KMemoryManager); + enum class Pool : u32 { Application = 0, Applet = 1, @@ -48,22 +53,33 @@ public: explicit KMemoryManager(Core::System& system_); - constexpr std::size_t GetSize(Pool pool) const { - return managers[static_cast<std::size_t>(pool)].GetSize(); + void Initialize(VAddr management_region, size_t management_region_size); + + constexpr size_t GetSize(Pool pool) const { + constexpr Direction GetSizeDirection = Direction::FromFront; + size_t total = 0; + for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; + manager = this->GetNextManager(manager, GetSizeDirection)) { + total += manager->GetSize(); + } + return total; } - void InitializeManager(Pool pool, u64 start_address, u64 end_address); + PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); + ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option); + ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option, + u64 process_id, u8 fill_pattern); + + static constexpr size_t MaxManagerCount = 10; - VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); - ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir, - u32 heap_fill_value = 0); - ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir, - u32 heap_fill_value = 0); + void Close(PAddr address, size_t num_pages); + void Close(const KPageLinkedList& pg); - static constexpr std::size_t MaxManagerCount = 10; + void Open(PAddr address, size_t num_pages); + void Open(const KPageLinkedList& pg); public: - static std::size_t CalculateManagementOverheadSize(std::size_t region_size) { + static size_t CalculateManagementOverheadSize(size_t region_size) { return Impl::CalculateManagementOverheadSize(region_size); } @@ -88,38 +104,34 @@ public: } private: - class Impl final : NonCopyable { - private: - using RefCount = u16; - - private: - KPageHeap heap; - Pool pool{}; - + class Impl final { public: - static std::size_t CalculateManagementOverheadSize(std::size_t region_size); - - static constexpr std::size_t CalculateOptimizedProcessOverheadSize( - std::size_t region_size) { - return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / - Common::BitSize<u64>()) * - sizeof(u64); - } + YUZU_NON_COPYABLE(Impl); + YUZU_NON_MOVEABLE(Impl); - public: Impl() = default; + ~Impl() = default; - std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); + size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, + Pool p); VAddr AllocateBlock(s32 index, bool random) { return heap.AllocateBlock(index, random); } - void Free(VAddr addr, std::size_t num_pages) { + void Free(VAddr addr, size_t num_pages) { heap.Free(addr, num_pages); } - constexpr std::size_t GetSize() const { + void SetInitialUsedHeapSize(size_t reserved_size) { + heap.SetInitialUsedSize(reserved_size); + } + + constexpr Pool GetPool() const { + return pool; + } + + constexpr size_t GetSize() const { return heap.GetSize(); } @@ -130,12 +142,137 @@ private: constexpr VAddr GetEndAddress() const { return heap.GetEndAddress(); } + + constexpr size_t GetPageOffset(PAddr address) const { + return heap.GetPageOffset(address); + } + + constexpr size_t GetPageOffsetToEnd(PAddr address) const { + return heap.GetPageOffsetToEnd(address); + } + + constexpr void SetNext(Impl* n) { + next = n; + } + + constexpr void SetPrev(Impl* n) { + prev = n; + } + + constexpr Impl* GetNext() const { + return next; + } + + constexpr Impl* GetPrev() const { + return prev; + } + + void OpenFirst(PAddr address, size_t num_pages) { + size_t index = this->GetPageOffset(address); + const size_t end = index + num_pages; + while (index < end) { + const RefCount ref_count = (++page_reference_counts[index]); + ASSERT(ref_count == 1); + + index++; + } + } + + void Open(PAddr address, size_t num_pages) { + size_t index = this->GetPageOffset(address); + const size_t end = index + num_pages; + while (index < end) { + const RefCount ref_count = (++page_reference_counts[index]); + ASSERT(ref_count > 1); + + index++; + } + } + + void Close(PAddr address, size_t num_pages) { + size_t index = this->GetPageOffset(address); + const size_t end = index + num_pages; + + size_t free_start = 0; + size_t free_count = 0; + while (index < end) { + ASSERT(page_reference_counts[index] > 0); + const RefCount ref_count = (--page_reference_counts[index]); + + // Keep track of how many zero refcounts we see in a row, to minimize calls to free. + if (ref_count == 0) { + if (free_count > 0) { + free_count++; + } else { + free_start = index; + free_count = 1; + } + } else { + if (free_count > 0) { + this->Free(heap.GetAddress() + free_start * PageSize, free_count); + free_count = 0; + } + } + + index++; + } + + if (free_count > 0) { + this->Free(heap.GetAddress() + free_start * PageSize, free_count); + } + } + + static size_t CalculateManagementOverheadSize(size_t region_size); + + static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { + return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / + Common::BitSize<u64>()) * + sizeof(u64); + } + + private: + using RefCount = u16; + + KPageHeap heap; + std::vector<RefCount> page_reference_counts; + VAddr management_region{}; + Pool pool{}; + Impl* next{}; + Impl* prev{}; }; private: + Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { + return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; + } + + const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { + return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; + } + + constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { + return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] + : pool_managers_head[static_cast<size_t>(pool)]; + } + + constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { + if (dir == Direction::FromBack) { + return cur->GetPrev(); + } else { + return cur->GetNext(); + } + } + + ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool, + Direction dir, bool random); + +private: Core::System& system; - std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks; + std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; + std::array<Impl*, MaxManagerCount> pool_managers_head{}; + std::array<Impl*, MaxManagerCount> pool_managers_tail{}; std::array<Impl, MaxManagerCount> managers; + size_t num_managers{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h index 90ab8fd62..e9bdf4e59 100644 --- a/src/core/hle/kernel/k_memory_region.h +++ b/src/core/hle/kernel/k_memory_region.h @@ -5,6 +5,7 @@ #pragma once #include "common/assert.h" +#include "common/common_funcs.h" #include "common/common_types.h" #include "common/intrusive_red_black_tree.h" #include "core/hle/kernel/k_memory_region_type.h" @@ -13,11 +14,13 @@ namespace Kernel { class KMemoryRegionAllocator; -class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion>, - NonCopyable { +class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion> { friend class KMemoryRegionTree; public: + YUZU_NON_COPYABLE(KMemoryRegion); + YUZU_NON_MOVEABLE(KMemoryRegion); + constexpr KMemoryRegion() = default; constexpr KMemoryRegion(u64 address_, u64 last_address_) : address{address_}, last_address{last_address_} {} @@ -29,6 +32,8 @@ public: : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, type_id_) {} + ~KMemoryRegion() = default; + static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) { if (lhs.GetAddress() < rhs.GetAddress()) { return -1; @@ -39,16 +44,6 @@ public: } } -private: - constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { - address = a; - pair_address = p; - last_address = la; - attributes = r; - type_id = t; - } - -public: constexpr u64 GetAddress() const { return address; } @@ -108,6 +103,14 @@ public: } private: + constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { + address = a; + pair_address = p; + last_address = la; + attributes = r; + type_id = t; + } + u64 address{}; u64 last_address{}; u64 pair_address{}; @@ -115,8 +118,25 @@ private: u32 type_id{}; }; -class KMemoryRegionTree final : NonCopyable { +class KMemoryRegionTree final { +private: + using TreeType = + Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>; + public: + YUZU_NON_COPYABLE(KMemoryRegionTree); + YUZU_NON_MOVEABLE(KMemoryRegionTree); + + using value_type = TreeType::value_type; + using size_type = TreeType::size_type; + using difference_type = TreeType::difference_type; + using pointer = TreeType::pointer; + using const_pointer = TreeType::const_pointer; + using reference = TreeType::reference; + using const_reference = TreeType::const_reference; + using iterator = TreeType::iterator; + using const_iterator = TreeType::const_iterator; + struct DerivedRegionExtents { const KMemoryRegion* first_region{}; const KMemoryRegion* last_region{}; @@ -140,29 +160,9 @@ public: } }; -private: - using TreeType = - Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>; - -public: - using value_type = TreeType::value_type; - using size_type = TreeType::size_type; - using difference_type = TreeType::difference_type; - using pointer = TreeType::pointer; - using const_pointer = TreeType::const_pointer; - using reference = TreeType::reference; - using const_reference = TreeType::const_reference; - using iterator = TreeType::iterator; - using const_iterator = TreeType::const_iterator; - -private: - TreeType m_tree{}; - KMemoryRegionAllocator& memory_region_allocator; - -public: explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_); + ~KMemoryRegionTree() = default; -public: KMemoryRegion* FindModifiable(u64 address) { if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) { return std::addressof(*it); @@ -241,7 +241,6 @@ public: return GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); } -public: void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0); bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); @@ -252,7 +251,6 @@ public: return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; } -public: // Iterator accessors. iterator begin() { return m_tree.begin(); @@ -322,13 +320,21 @@ public: iterator nfind(const_reference ref) const { return m_tree.nfind(ref); } + +private: + TreeType m_tree{}; + KMemoryRegionAllocator& memory_region_allocator; }; -class KMemoryRegionAllocator final : NonCopyable { +class KMemoryRegionAllocator final { public: + YUZU_NON_COPYABLE(KMemoryRegionAllocator); + YUZU_NON_MOVEABLE(KMemoryRegionAllocator); + static constexpr size_t MaxMemoryRegions = 200; constexpr KMemoryRegionAllocator() = default; + constexpr ~KMemoryRegionAllocator() = default; template <typename... Args> KMemoryRegion* Allocate(Args&&... args) { diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index a05e66677..0baeddf51 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -14,7 +14,8 @@ namespace Kernel { enum KMemoryRegionType : u32 { - KMemoryRegionAttr_CarveoutProtected = 0x04000000, + KMemoryRegionAttr_CarveoutProtected = 0x02000000, + KMemoryRegionAttr_Uncached = 0x04000000, KMemoryRegionAttr_DidKernelMap = 0x08000000, KMemoryRegionAttr_ShouldKernelMap = 0x10000000, KMemoryRegionAttr_UserReadOnly = 0x20000000, @@ -239,6 +240,11 @@ static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); +// UNUSED: .DeriveSparse(2, 2, 0); +constexpr auto KMemoryRegionType_VirtualDramUnknownDebug = + KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); +static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); + constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); constexpr auto KMemoryRegionType_VirtualDramPoolManagement = @@ -330,6 +336,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { return KMemoryRegionType_VirtualDramKernelTraceBuffer; } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { return KMemoryRegionType_VirtualDramKernelPtHeap; + } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { + return KMemoryRegionType_VirtualDramUnknownDebug; } else { return KMemoryRegionType_Dram; } diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp new file mode 100644 index 000000000..f7df4a9a8 --- /dev/null +++ b/src/core/hle/kernel/k_page_buffer.cpp @@ -0,0 +1,19 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "common/assert.h" +#include "core/core.h" +#include "core/device_memory.h" +#include "core/hle/kernel/k_page_buffer.h" +#include "core/hle/kernel/memory_types.h" + +namespace Kernel { + +KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { + ASSERT(Common::IsAligned(phys_addr, PageSize)); + return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h new file mode 100644 index 000000000..6ff3c1568 --- /dev/null +++ b/src/core/hle/kernel/k_page_buffer.h @@ -0,0 +1,29 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> + +#include "common/common_types.h" +#include "core/device_memory.h" +#include "core/hle/kernel/memory_types.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +class KPageBuffer final : public KSlabAllocated<KPageBuffer> { +public: + KPageBuffer() = default; + + static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); + +private: + [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; +}; + +static_assert(sizeof(KPageBuffer) == PageSize); +static_assert(alignof(KPageBuffer) == PageSize); + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 29d996d62..97a5890a0 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp @@ -7,35 +7,51 @@ namespace Kernel { -void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { - // Check our assumptions - ASSERT(Common::IsAligned((address), PageSize)); +void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address, + size_t management_size, const size_t* block_shifts, + size_t num_block_shifts) { + // Check our assumptions. + ASSERT(Common::IsAligned(address, PageSize)); ASSERT(Common::IsAligned(size, PageSize)); + ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts); + const VAddr management_end = management_address + management_size; - // Set our members - heap_address = address; - heap_size = size; - - // Setup bitmaps - metadata.resize(metadata_size / sizeof(u64)); - u64* cur_bitmap_storage{metadata.data()}; - for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { - const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; - const std::size_t next_block_shift{ - (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; - cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, - next_block_shift, cur_bitmap_storage); + // Set our members. + m_heap_address = address; + m_heap_size = size; + m_num_blocks = num_block_shifts; + + // Setup bitmaps. + m_management_data.resize(management_size / sizeof(u64)); + u64* cur_bitmap_storage{m_management_data.data()}; + for (size_t i = 0; i < num_block_shifts; i++) { + const size_t cur_block_shift = block_shifts[i]; + const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; + cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift, + next_block_shift, cur_bitmap_storage); } + + // Ensure we didn't overextend our bounds. + ASSERT(VAddr(cur_bitmap_storage) <= management_end); +} + +size_t KPageHeap::GetNumFreePages() const { + size_t num_free = 0; + + for (size_t i = 0; i < m_num_blocks; i++) { + num_free += m_blocks[i].GetNumFreePages(); + } + + return num_free; } -VAddr KPageHeap::AllocateBlock(s32 index, bool random) { - const std::size_t needed_size{blocks[index].GetSize()}; +PAddr KPageHeap::AllocateBlock(s32 index, bool random) { + const size_t needed_size = m_blocks[index].GetSize(); - for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) { - if (const VAddr addr{blocks[i].PopBlock(random)}; addr) { - if (const std::size_t allocated_size{blocks[i].GetSize()}; - allocated_size > needed_size) { - Free(addr + needed_size, (allocated_size - needed_size) / PageSize); + for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { + if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { + if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { + this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } return addr; } @@ -44,34 +60,34 @@ VAddr KPageHeap::AllocateBlock(s32 index, bool random) { return 0; } -void KPageHeap::FreeBlock(VAddr block, s32 index) { +void KPageHeap::FreeBlock(PAddr block, s32 index) { do { - block = blocks[index++].PushBlock(block); + block = m_blocks[index++].PushBlock(block); } while (block != 0); } -void KPageHeap::Free(VAddr addr, std::size_t num_pages) { - // Freeing no pages is a no-op +void KPageHeap::Free(PAddr addr, size_t num_pages) { + // Freeing no pages is a no-op. if (num_pages == 0) { return; } - // Find the largest block size that we can free, and free as many as possible - s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1}; - const VAddr start{addr}; - const VAddr end{(num_pages * PageSize) + addr}; - VAddr before_start{start}; - VAddr before_end{start}; - VAddr after_start{end}; - VAddr after_end{end}; + // Find the largest block size that we can free, and free as many as possible. + s32 big_index = static_cast<s32>(m_num_blocks) - 1; + const PAddr start = addr; + const PAddr end = addr + num_pages * PageSize; + PAddr before_start = start; + PAddr before_end = start; + PAddr after_start = end; + PAddr after_end = end; while (big_index >= 0) { - const std::size_t block_size{blocks[big_index].GetSize()}; - const VAddr big_start{Common::AlignUp((start), block_size)}; - const VAddr big_end{Common::AlignDown((end), block_size)}; + const size_t block_size = m_blocks[big_index].GetSize(); + const PAddr big_start = Common::AlignUp(start, block_size); + const PAddr big_end = Common::AlignDown(end, block_size); if (big_start < big_end) { - // Free as many big blocks as we can - for (auto block{big_start}; block < big_end; block += block_size) { - FreeBlock(block, big_index); + // Free as many big blocks as we can. + for (auto block = big_start; block < big_end; block += block_size) { + this->FreeBlock(block, big_index); } before_end = big_start; after_start = big_end; @@ -81,31 +97,31 @@ void KPageHeap::Free(VAddr addr, std::size_t num_pages) { } ASSERT(big_index >= 0); - // Free space before the big blocks - for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; + // Free space before the big blocks. + for (s32 i = big_index - 1; i >= 0; i--) { + const size_t block_size = m_blocks[i].GetSize(); while (before_start + block_size <= before_end) { before_end -= block_size; - FreeBlock(before_end, i); + this->FreeBlock(before_end, i); } } - // Free space after the big blocks - for (s32 i{big_index - 1}; i >= 0; i--) { - const std::size_t block_size{blocks[i].GetSize()}; + // Free space after the big blocks. + for (s32 i = big_index - 1; i >= 0; i--) { + const size_t block_size = m_blocks[i].GetSize(); while (after_start + block_size <= after_end) { - FreeBlock(after_start, i); + this->FreeBlock(after_start, i); after_start += block_size; } } } -std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) { - std::size_t overhead_size = 0; - for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { - const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; - const std::size_t next_block_shift{ - (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; +size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, + size_t num_block_shifts) { + size_t overhead_size = 0; + for (size_t i = 0; i < num_block_shifts; i++) { + const size_t cur_block_shift = block_shifts[i]; + const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; overhead_size += KPageHeap::Block::CalculateManagementOverheadSize( region_size, cur_block_shift, next_block_shift); } diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index 8d9f30523..60fff766b 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h @@ -8,183 +8,210 @@ #include <vector> #include "common/alignment.h" +#include "common/common_funcs.h" #include "common/common_types.h" #include "core/hle/kernel/k_page_bitmap.h" #include "core/hle/kernel/memory_types.h" namespace Kernel { -class KPageHeap final : NonCopyable { +class KPageHeap final { public: - static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { - const auto target_pages{std::max(num_pages, align_pages)}; - for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { - if (target_pages <= - (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { + YUZU_NON_COPYABLE(KPageHeap); + YUZU_NON_MOVEABLE(KPageHeap); + + KPageHeap() = default; + ~KPageHeap() = default; + + constexpr PAddr GetAddress() const { + return m_heap_address; + } + constexpr size_t GetSize() const { + return m_heap_size; + } + constexpr PAddr GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + constexpr size_t GetPageOffset(PAddr block) const { + return (block - this->GetAddress()) / PageSize; + } + constexpr size_t GetPageOffsetToEnd(PAddr block) const { + return (this->GetEndAddress() - block) / PageSize; + } + + void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address, + size_t management_size) { + return this->Initialize(heap_address, heap_size, management_address, management_size, + MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts); + } + + size_t GetFreeSize() const { + return this->GetNumFreePages() * PageSize; + } + + void SetInitialUsedSize(size_t reserved_size) { + // Check that the reserved size is valid. + const size_t free_size = this->GetNumFreePages() * PageSize; + ASSERT(m_heap_size >= free_size + reserved_size); + + // Set the initial used size. + m_initial_used_size = m_heap_size - free_size - reserved_size; + } + + PAddr AllocateBlock(s32 index, bool random); + void Free(PAddr addr, size_t num_pages); + + static size_t CalculateManagementOverheadSize(size_t region_size) { + return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(), + NumMemoryBlockPageShifts); + } + + static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { + const size_t target_pages = std::max(num_pages, align_pages); + for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { + if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { return static_cast<s32>(i); } } return -1; } - static constexpr s32 GetBlockIndex(std::size_t num_pages) { - for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { - if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { + static constexpr s32 GetBlockIndex(size_t num_pages) { + for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { + if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { return i; } } return -1; } - static constexpr std::size_t GetBlockSize(std::size_t index) { - return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index]; + static constexpr size_t GetBlockSize(size_t index) { + return size_t(1) << MemoryBlockPageShifts[index]; } - static constexpr std::size_t GetBlockNumPages(std::size_t index) { + static constexpr size_t GetBlockNumPages(size_t index) { return GetBlockSize(index) / PageSize; } private: - static constexpr std::size_t NumMemoryBlockPageShifts{7}; - static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ - 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, - }; - - class Block final : NonCopyable { - private: - KPageBitmap bitmap; - VAddr heap_address{}; - uintptr_t end_offset{}; - std::size_t block_shift{}; - std::size_t next_block_shift{}; - + class Block final { public: + YUZU_NON_COPYABLE(Block); + YUZU_NON_MOVEABLE(Block); + Block() = default; + ~Block() = default; - constexpr std::size_t GetShift() const { - return block_shift; + constexpr size_t GetShift() const { + return m_block_shift; } - constexpr std::size_t GetNextShift() const { - return next_block_shift; + constexpr size_t GetNextShift() const { + return m_next_block_shift; } - constexpr std::size_t GetSize() const { - return static_cast<std::size_t>(1) << GetShift(); + constexpr size_t GetSize() const { + return u64(1) << this->GetShift(); } - constexpr std::size_t GetNumPages() const { - return GetSize() / PageSize; + constexpr size_t GetNumPages() const { + return this->GetSize() / PageSize; } - constexpr std::size_t GetNumFreeBlocks() const { - return bitmap.GetNumBits(); + constexpr size_t GetNumFreeBlocks() const { + return m_bitmap.GetNumBits(); } - constexpr std::size_t GetNumFreePages() const { - return GetNumFreeBlocks() * GetNumPages(); + constexpr size_t GetNumFreePages() const { + return this->GetNumFreeBlocks() * this->GetNumPages(); } - u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, - u64* bit_storage) { - // Set shifts - block_shift = bs; - next_block_shift = nbs; - - // Align up the address - VAddr end{addr + size}; - const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift) - : (1ULL << block_shift)}; - addr = Common::AlignDown((addr), align); - end = Common::AlignUp((end), align); - - heap_address = addr; - end_offset = (end - addr) / (1ULL << block_shift); - return bitmap.Initialize(bit_storage, end_offset); + u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) { + // Set shifts. + m_block_shift = bs; + m_next_block_shift = nbs; + + // Align up the address. + PAddr end = addr + size; + const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) + : (u64(1) << m_block_shift); + addr = Common::AlignDown(addr, align); + end = Common::AlignUp(end, align); + + m_heap_address = addr; + m_end_offset = (end - addr) / (u64(1) << m_block_shift); + return m_bitmap.Initialize(bit_storage, m_end_offset); } - VAddr PushBlock(VAddr address) { - // Set the bit for the free block - std::size_t offset{(address - heap_address) >> GetShift()}; - bitmap.SetBit(offset); + PAddr PushBlock(PAddr address) { + // Set the bit for the free block. + size_t offset = (address - m_heap_address) >> this->GetShift(); + m_bitmap.SetBit(offset); - // If we have a next shift, try to clear the blocks below and return the address - if (GetNextShift()) { - const auto diff{1ULL << (GetNextShift() - GetShift())}; + // If we have a next shift, try to clear the blocks below this one and return the new + // address. + if (this->GetNextShift()) { + const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift()); offset = Common::AlignDown(offset, diff); - if (bitmap.ClearRange(offset, diff)) { - return heap_address + (offset << GetShift()); + if (m_bitmap.ClearRange(offset, diff)) { + return m_heap_address + (offset << this->GetShift()); } } - // We couldn't coalesce, or we're already as big as possible - return 0; + // We couldn't coalesce, or we're already as big as possible. + return {}; } - VAddr PopBlock(bool random) { - // Find a free block - const s64 soffset{bitmap.FindFreeBlock(random)}; + PAddr PopBlock(bool random) { + // Find a free block. + s64 soffset = m_bitmap.FindFreeBlock(random); if (soffset < 0) { - return 0; + return {}; } - const auto offset{static_cast<std::size_t>(soffset)}; + const size_t offset = static_cast<size_t>(soffset); - // Update our tracking and return it - bitmap.ClearBit(offset); - return heap_address + (offset << GetShift()); + // Update our tracking and return it. + m_bitmap.ClearBit(offset); + return m_heap_address + (offset << this->GetShift()); } public: - static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size, - std::size_t cur_block_shift, - std::size_t next_block_shift) { - const auto cur_block_size{(1ULL << cur_block_shift)}; - const auto next_block_size{(1ULL << next_block_shift)}; - const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size}; + static constexpr size_t CalculateManagementOverheadSize(size_t region_size, + size_t cur_block_shift, + size_t next_block_shift) { + const size_t cur_block_size = (u64(1) << cur_block_shift); + const size_t next_block_size = (u64(1) << next_block_shift); + const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size; return KPageBitmap::CalculateManagementOverheadSize( (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); } - }; - -public: - KPageHeap() = default; - - constexpr VAddr GetAddress() const { - return heap_address; - } - constexpr std::size_t GetSize() const { - return heap_size; - } - constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); - } - constexpr std::size_t GetPageOffset(VAddr block) const { - return (block - GetAddress()) / PageSize; - } - void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); - VAddr AllocateBlock(s32 index, bool random); - void Free(VAddr addr, std::size_t num_pages); - - void UpdateUsedSize() { - used_size = heap_size - (GetNumFreePages() * PageSize); - } - - static std::size_t CalculateManagementOverheadSize(std::size_t region_size); + private: + KPageBitmap m_bitmap; + PAddr m_heap_address{}; + uintptr_t m_end_offset{}; + size_t m_block_shift{}; + size_t m_next_block_shift{}; + }; private: - constexpr std::size_t GetNumFreePages() const { - std::size_t num_free{}; + void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address, + size_t management_size, const size_t* block_shifts, size_t num_block_shifts); + size_t GetNumFreePages() const; - for (const auto& block : blocks) { - num_free += block.GetNumFreePages(); - } + void FreeBlock(PAddr block, s32 index); - return num_free; - } + static constexpr size_t NumMemoryBlockPageShifts{7}; + static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ + 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, + }; - void FreeBlock(VAddr block, s32 index); +private: + static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, + size_t num_block_shifts); - VAddr heap_address{}; - std::size_t heap_size{}; - std::size_t used_size{}; - std::array<Block, NumMemoryBlockPageShifts> blocks{}; - std::vector<u64> metadata; +private: + PAddr m_heap_address{}; + size_t m_heap_size{}; + size_t m_initial_used_size{}; + size_t m_num_blocks{}; + std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; + std::vector<u64> m_management_data; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 2ebbc0819..02d93b12e 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -41,27 +41,12 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT } } -constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) { - if (info.GetAddress() < addr) { - return addr; - } - return info.GetAddress(); -} - -constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) { - std::size_t size{info.GetSize()}; - if (info.GetAddress() < start) { - size -= start - info.GetAddress(); - } - if (info.GetEndAddress() > end) { - size -= info.GetEndAddress() - end; - } - return size; -} - } // namespace -KPageTable::KPageTable(Core::System& system_) : system{system_} {} +KPageTable::KPageTable(Core::System& system_) + : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} + +KPageTable::~KPageTable() = default; ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, VAddr code_addr, @@ -282,96 +267,228 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); // Lock the table. - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); // Verify that the destination memory is unmapped. R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + KPageLinkedList pg; + R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + &pg, num_pages, + KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); - KPageLinkedList page_linked_list; - R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool, - allocation_option)); - R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)); + R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); block_manager->Update(addr, num_pages, state, perm); return ResultSuccess; } -ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; - - const std::size_t num_pages{size / PageSize}; +ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { + // Validate the mapping request. + R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), + ResultInvalidMemoryRegion); - KMemoryState state{}; - KMemoryPermission perm{}; - CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size, - KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, - KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + // Lock the table. + KScopedLightLock lk(general_lock); - if (IsRegionMapped(dst_addr, size)) { - return ResultInvalidCurrentMemory; - } + // Verify that the source memory is normal heap. + KMemoryState src_state{}; + KMemoryPermission src_perm{}; + std::size_t num_src_allocator_blocks{}; + R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, + src_address, size, KMemoryState::All, KMemoryState::Normal, + KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); - KPageLinkedList page_linked_list; - AddRegionToPages(src_addr, num_pages, page_linked_list); + // Verify that the destination memory is unmapped. + std::size_t num_dst_allocator_blocks{}; + R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, + KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + // Map the code memory. { - auto block_guard = detail::ScopeExit( - [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); }); + // Determine the number of pages being operated on. + const std::size_t num_pages = size / PageSize; - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, - OperationType::ChangePermissions)); - CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None)); + // Create page groups for the memory being mapped. + KPageLinkedList pg; + AddRegionToPages(src_address, num_pages, pg); - block_guard.Cancel(); - } + // Reprotect the source as kernel-read/not mapped. + const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead | + KMemoryPermission::NotMapped); + R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions)); - block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None, - KMemoryAttribute::Locked); - block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode); + // Ensure that we unprotect the source pages on failure. + auto unprot_guard = SCOPE_GUARD({ + ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions) + .IsSuccess()); + }); + + // Map the alias pages. + R_TRY(MapPages(dst_address, pg, new_perm)); + + // We successfully mapped the alias pages, so we don't need to unprotect the src pages on + // failure. + unprot_guard.Cancel(); + + // Apply the memory block updates. + block_manager->Update(src_address, num_pages, src_state, new_perm, + KMemoryAttribute::Locked); + block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, + KMemoryAttribute::None); + } return ResultSuccess; } -ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; +ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { + // Validate the mapping request. + R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), + ResultInvalidMemoryRegion); + + // Lock the table. + KScopedLightLock lk(general_lock); - if (!size) { - return ResultSuccess; + // Verify that the source memory is locked normal heap. + std::size_t num_src_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::Locked)); + + // Verify that the destination memory is aliasable code. + std::size_t num_dst_allocator_blocks{}; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, + KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All, KMemoryAttribute::None)); + + // Determine whether any pages being unmapped are code. + bool any_code_pages = false; + { + KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); + while (true) { + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Check if the memory has code flag. + if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) { + any_code_pages = true; + break; + } + + // Check if we're done. + if (dst_address + size - 1 <= info.GetLastAddress()) { + break; + } + + // Advance. + ++it; + } } - const std::size_t num_pages{size / PageSize}; + // Ensure that we maintain the instruction cache. + bool reprotected_pages = false; + SCOPE_EXIT({ + if (reprotected_pages && any_code_pages) { + system.InvalidateCpuInstructionCacheRange(dst_address, size); + } + }); - CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, nullptr, src_addr, size, - KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + // Unmap. + { + // Determine the number of pages being operated on. + const std::size_t num_pages = size / PageSize; - KMemoryState state{}; - CASCADE_CODE(CheckMemoryState( - &state, nullptr, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias, - KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); - CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None)); - CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); + // Unmap the aliased copy of the pages. + R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); - block_manager->Update(dst_addr, num_pages, KMemoryState::Free); - block_manager->Update(src_addr, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite); + // Try to set the permissions for the source pages back to what they should be. + R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, + OperationType::ChangePermissions)); - system.InvalidateCpuInstructionCacheRange(dst_addr, size); + // Apply the memory block updates. + block_manager->Update(dst_address, num_pages, KMemoryState::None); + block_manager->Update(src_address, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite); + + // Note that we reprotected pages. + reprotected_pages = true; + } return ResultSuccess; } +VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, + std::size_t num_pages, std::size_t alignment, std::size_t offset, + std::size_t guard_pages) { + VAddr address = 0; + + if (num_pages <= region_num_pages) { + if (this->IsAslrEnabled()) { + // Try to directly find a free area up to 8 times. + for (std::size_t i = 0; i < 8; i++) { + const std::size_t random_offset = + KSystemControl::GenerateRandomRange( + 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * + alignment; + const VAddr candidate = + Common::AlignDown((region_start + random_offset), alignment) + offset; + + KMemoryInfo info = this->QueryInfoImpl(candidate); + + if (info.state != KMemoryState::Free) { + continue; + } + if (region_start > candidate) { + continue; + } + if (info.GetAddress() + guard_pages * PageSize > candidate) { + continue; + } + + const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1; + if (candidate_end > info.GetLastAddress()) { + continue; + } + if (candidate_end > region_start + region_num_pages * PageSize - 1) { + continue; + } + + address = candidate; + break; + } + // Fall back to finding the first free area with a random offset. + if (address == 0) { + // NOTE: Nintendo does not account for guard pages here. + // This may theoretically cause an offset to be chosen that cannot be mapped. We + // will account for guard pages. + const std::size_t offset_pages = KSystemControl::GenerateRandomRange( + 0, region_num_pages - num_pages - guard_pages); + address = block_manager->FindFreeArea(region_start + offset_pages * PageSize, + region_num_pages - offset_pages, num_pages, + alignment, offset, guard_pages); + } + } + + // Find the first free area. + if (address == 0) { + address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); + } + } + + return address; +} + ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, VAddr src_addr) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); const std::size_t num_pages{size / PageSize}; @@ -397,150 +514,482 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, return ResultSuccess; } -ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { +ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Lock the physical memory lock. - std::lock_guard phys_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); - // Lock the table. - std::lock_guard lock{page_table_lock}; + // Calculate the last address for convenience. + const VAddr last_address = address + size - 1; - std::size_t mapped_size{}; - const VAddr end_addr{addr + size}; + // Define iteration variables. + VAddr cur_address; + std::size_t mapped_size; - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state != KMemoryState::Free) { - mapped_size += GetSizeInRange(info, addr, end_addr); - } - }); - - if (mapped_size == size) { - return ResultSuccess; - } + // The entire mapping process can be retried. + while (true) { + // Check if the memory is already mapped. + { + // Lock the table. + KScopedLightLock lk(general_lock); + + // Iterate over the memory. + cur_address = address; + mapped_size = 0; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (info.GetState() != KMemoryState::Free) { + mapped_size += (last_address + 1 - cur_address); + } + break; + } + + // Track the memory if it's mapped. + if (info.GetState() != KMemoryState::Free) { + mapped_size += VAddr(info.GetEndAddress()) - cur_address; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } - const std::size_t remaining_size{size - mapped_size}; - const std::size_t remaining_pages{remaining_size / PageSize}; + // If the size mapped is the size requested, we've nothing to do. + R_SUCCEED_IF(size == mapped_size); + } - // Reserve the memory from the process resource limit. - KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, - remaining_size); - if (!memory_reservation.Succeeded()) { - LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); - return ResultLimitReached; + // Allocate and map the memory. + { + // Reserve the memory from the process resource limit. + KScopedResourceReservation memory_reservation( + system.Kernel().CurrentProcess()->GetResourceLimit(), + LimitableResource::PhysicalMemory, size - mapped_size); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); + + // Allocate pages for the new memory. + KPageLinkedList pg; + R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + &pg, (size - mapped_size) / PageSize, + KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + + // Map the memory. + { + // Lock the table. + KScopedLightLock lk(general_lock); + + size_t num_allocator_blocks = 0; + + // Verify that nobody has mapped memory since we first checked. + { + // Iterate over the memory. + size_t checked_mapped_size = 0; + cur_address = address; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + const bool is_free = info.GetState() == KMemoryState::Free; + if (is_free) { + if (info.GetAddress() < address) { + ++num_allocator_blocks; + } + if (last_address < info.GetLastAddress()) { + ++num_allocator_blocks; + } + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (!is_free) { + checked_mapped_size += (last_address + 1 - cur_address); + } + break; + } + + // Track the memory if it's mapped. + if (!is_free) { + checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // If the size now isn't what it was before, somebody mapped or unmapped + // concurrently. If this happened, retry. + if (mapped_size != checked_mapped_size) { + continue; + } + } + + // Reset the current tracking address, and make sure we clean up on failure. + cur_address = address; + auto unmap_guard = detail::ScopeExit([&] { + if (cur_address > address) { + const VAddr last_unmap_address = cur_address - 1; + + // Iterate, unmapping the pages. + cur_address = address; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If the memory state is free, we mapped it and need to unmap it. + if (info.GetState() == KMemoryState::Free) { + // Determine the range to unmap. + const size_t cur_pages = + std::min(VAddr(info.GetEndAddress()) - cur_address, + last_unmap_address + 1 - cur_address) / + PageSize; + + // Unmap. + ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, + OperationType::Unmap) + .IsSuccess()); + } + + // Check if we're done. + if (last_unmap_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + } + }); + + // Iterate over the memory. + auto pg_it = pg.Nodes().begin(); + PAddr pg_phys_addr = pg_it->GetAddress(); + size_t pg_pages = pg_it->GetNumPages(); + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If it's unmapped, we need to map it. + if (info.GetState() == KMemoryState::Free) { + // Determine the range to map. + size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, + last_address + 1 - cur_address) / + PageSize; + + // While we have pages to map, map them. + while (map_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != pg.Nodes().end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Map whatever we can. + const size_t cur_pages = std::min(pg_pages, map_pages); + R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, + OperationType::Map, pg_phys_addr)); + + // Advance. + cur_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + } + + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } + + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + + // We succeeded, so commit the memory reservation. + memory_reservation.Commit(); + + // Increase our tracked mapped size. + mapped_physical_memory_size += (size - mapped_size); + + // Update the relevant memory blocks. + block_manager->Update(address, size / PageSize, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryState::Normal, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None); + + // Cancel our guard. + unmap_guard.Cancel(); + + return ResultSuccess; + } + } } +} - KPageLinkedList page_linked_list; +ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { + // Lock the physical memory lock. + KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); - CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, - memory_pool, allocation_option)); + // Lock the table. + KScopedLightLock lk(general_lock); - // We succeeded, so commit the memory reservation. - memory_reservation.Commit(); + // Calculate the last address for convenience. + const VAddr last_address = address + size - 1; - // Map the memory. - auto node{page_linked_list.Nodes().begin()}; - PAddr map_addr{node->GetAddress()}; - std::size_t src_num_pages{node->GetNumPages()}; - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state != KMemoryState::Free) { - return; - } + // Define iteration variables. + VAddr cur_address = 0; + std::size_t mapped_size = 0; + std::size_t num_allocator_blocks = 0; - std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize}; - VAddr dst_addr{GetAddressInRange(info, addr)}; + // Check if the memory is mapped. + { + // Iterate over the memory. + cur_address = address; + mapped_size = 0; + + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); + + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); + + // Verify the memory's state. + const bool is_normal = info.GetState() == KMemoryState::Normal && + info.GetAttribute() == KMemoryAttribute::None; + const bool is_free = info.GetState() == KMemoryState::Free; + R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); + + if (is_normal) { + R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); + + if (info.GetAddress() < address) { + ++num_allocator_blocks; + } + if (last_address < info.GetLastAddress()) { + ++num_allocator_blocks; + } + } - while (dst_num_pages) { - if (!src_num_pages) { - node = std::next(node); - map_addr = node->GetAddress(); - src_num_pages = node->GetNumPages(); + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + if (is_normal) { + mapped_size += (last_address + 1 - cur_address); + } + break; } - const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; - Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map, - map_addr); + // Track the memory if it's mapped. + if (is_normal) { + mapped_size += VAddr(info.GetEndAddress()) - cur_address; + } - dst_addr += num_pages * PageSize; - map_addr += num_pages * PageSize; - src_num_pages -= num_pages; - dst_num_pages -= num_pages; + // Advance. + cur_address = info.GetEndAddress(); + ++it; } - }); - - mapped_physical_memory_size += remaining_size; - const std::size_t num_pages{size / PageSize}; - block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); + // If there's nothing mapped, we've nothing to do. + R_SUCCEED_IF(mapped_size == 0); + } - return ResultSuccess; -} + // Make a page group for the unmap region. + KPageLinkedList pg; + { + auto& impl = this->PageTableImpl(); + + // Begin traversal. + Common::PageTable::TraversalContext context; + Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; + bool cur_valid = false; + Common::PageTable::TraversalEntry next_entry; + bool next_valid = false; + size_t tot_size = 0; + + cur_address = address; + next_valid = impl.BeginTraversal(next_entry, context, cur_address); + next_entry.block_size = + (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); + + // Iterate, building the group. + while (true) { + if ((!next_valid && !cur_valid) || + (next_valid && cur_valid && + next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { + cur_entry.block_size += next_entry.block_size; + } else { + if (cur_valid) { + // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); + } + + // Update tracking variables. + tot_size += cur_entry.block_size; + cur_entry = next_entry; + cur_valid = next_valid; + } -ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + if (cur_entry.block_size + tot_size >= size) { + break; + } - const VAddr end_addr{addr + size}; - ResultCode result{ResultSuccess}; - std::size_t mapped_size{}; + next_valid = impl.ContinueTraversal(next_entry, context); + } - // Verify that the region can be unmapped - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state == KMemoryState::Normal) { - if (info.attribute != KMemoryAttribute::None) { - result = ResultInvalidCurrentMemory; - return; + // Add the last block. + if (cur_valid) { + // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); + } + } + ASSERT(pg.GetNumPages() == mapped_size / PageSize); + + // Reset the current tracking address, and make sure we clean up on failure. + cur_address = address; + auto remap_guard = detail::ScopeExit([&] { + if (cur_address > address) { + const VAddr last_map_address = cur_address - 1; + cur_address = address; + + // Iterate over the memory we unmapped. + auto it = block_manager->FindIterator(cur_address); + auto pg_it = pg.Nodes().begin(); + PAddr pg_phys_addr = pg_it->GetAddress(); + size_t pg_pages = pg_it->GetNumPages(); + + while (true) { + // Get the memory info for the pages we unmapped, convert to property. + const KMemoryInfo info = it->GetMemoryInfo(); + + // If the memory is normal, we unmapped it and need to re-map it. + if (info.GetState() == KMemoryState::Normal) { + // Determine the range to map. + size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, + last_map_address + 1 - cur_address) / + PageSize; + + // While we have pages to map, map them. + while (map_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != pg.Nodes().end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Map whatever we can. + const size_t cur_pages = std::min(pg_pages, map_pages); + ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), + OperationType::Map, pg_phys_addr) == ResultSuccess); + + // Advance. + cur_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + } + + // Check if we're done. + if (last_map_address <= info.GetLastAddress()) { + break; + } + + // Advance. + ++it; } - mapped_size += GetSizeInRange(info, addr, end_addr); - } else if (info.state != KMemoryState::Free) { - result = ResultInvalidCurrentMemory; } }); - if (result.IsError()) { - return result; - } + // Iterate over the memory, unmapping as we go. + auto it = block_manager->FindIterator(cur_address); + while (true) { + // Check that the iterator is valid. + ASSERT(it != block_manager->end()); - if (!mapped_size) { - return ResultSuccess; - } + // Get the memory info. + const KMemoryInfo info = it->GetMemoryInfo(); - // Unmap each region within the range - KPageLinkedList page_linked_list; - block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { - if (info.state == KMemoryState::Normal) { - const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; - const std::size_t block_num_pages{block_size / PageSize}; - const VAddr block_addr{GetAddressInRange(info, addr)}; - - AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); - - if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None, - OperationType::Unmap); - result.IsError()) { - return; - } + // If the memory state is normal, we need to unmap it. + if (info.GetState() == KMemoryState::Normal) { + // Determine the range to unmap. + const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, + last_address + 1 - cur_address) / + PageSize; + + // Unmap. + R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); } - }); - if (result.IsError()) { - return result; - } - const std::size_t num_pages{size / PageSize}; - system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool, - allocation_option); + // Check if we're done. + if (last_address <= info.GetLastAddress()) { + break; + } - block_manager->Update(addr, num_pages, KMemoryState::Free); + // Advance. + cur_address = info.GetEndAddress(); + ++it; + } + // Release the memory resource. + mapped_physical_memory_size -= mapped_size; auto process{system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); - mapped_physical_memory_size -= mapped_size; + + // Update memory blocks. + block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None); + + // TODO(bunnei): This is a workaround until the next set of changes, where we add reference + // counting for mapped pages. Until then, we must manually close the reference to the page + // group. + system.Kernel().MemoryManager().Close(pg); + + // We succeeded. + remap_guard.Cancel(); return ResultSuccess; } ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryState src_state{}; CASCADE_CODE(CheckMemoryState( @@ -579,7 +1028,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz } ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryState src_state{}; CASCADE_CODE(CheckMemoryState( @@ -622,6 +1071,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list, KMemoryPermission perm) { + ASSERT(this->IsLockedByCurrentThread()); + VAddr cur_addr{addr}; for (const auto& node : page_linked_list.Nodes()) { @@ -650,7 +1101,7 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); // Lock the table. - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); // Check the memory state. R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, @@ -666,13 +1117,54 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list return ResultSuccess; } +ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, + PAddr phys_addr, bool is_pa_valid, VAddr region_start, + std::size_t region_num_pages, KMemoryState state, + KMemoryPermission perm) { + ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); + + // Ensure this is a valid map request. + R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), + ResultInvalidCurrentMemory); + R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); + + // Lock the table. + KScopedLightLock lk(general_lock); + + // Find a random address to map at. + VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, + this->GetNumGuardPages()); + R_UNLESS(addr != 0, ResultOutOfMemory); + ASSERT(Common::IsAligned(addr, alignment)); + ASSERT(this->CanContain(addr, num_pages * PageSize, state)); + ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryAttribute::None) + .IsSuccess()); + + // Perform mapping operation. + if (is_pa_valid) { + R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); + } else { + UNIMPLEMENTED(); + } + + // Update the blocks. + block_manager->Update(addr, num_pages, state, perm); + + // We successfully mapped the pages. + *out_addr = addr; + return ResultSuccess; +} + ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) { + ASSERT(this->IsLockedByCurrentThread()); + VAddr cur_addr{addr}; for (const auto& node : page_linked_list.Nodes()) { - const std::size_t num_pages{(addr - cur_addr) / PageSize}; - if (const auto result{ - Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)}; + if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, + OperationType::Unmap)}; result.IsError()) { return result; } @@ -691,7 +1183,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); // Check the memory state. R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, @@ -707,12 +1199,36 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, return ResultSuccess; } +ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { + // Check that the unmap is in range. + const std::size_t size = num_pages * PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(general_lock); + + // Check the memory state. + std::size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, state, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Perform the unmap. + R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); + + // Update the blocks. + block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); + + return ResultSuccess; +} + ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -766,7 +1282,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, } KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); return block_manager->FindBlock(addr).GetMemoryInfo(); } @@ -781,7 +1297,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) { } ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryState state{}; KMemoryAttribute attribute{}; @@ -799,7 +1315,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo } ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryState state{}; @@ -818,7 +1334,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, const size_t num_pages = size / PageSize; // Lock the table. - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -847,7 +1363,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask KMemoryAttribute::SetMask); // Lock the table. - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); // Verify we can change the memory attribute. KMemoryState old_state; @@ -878,7 +1394,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask ResultCode KPageTable::SetMaxHeapSize(std::size_t size) { // Lock the table. - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); // Only process page tables are allowed to set heap size. ASSERT(!this->IsKernel()); @@ -889,15 +1405,15 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) { } ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { - // Lock the physical memory lock. - std::lock_guard phys_lk(map_physical_memory_lock); + // Lock the physical memory mutex. + KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); // Try to perform a reduction in heap, instead of an extension. VAddr cur_address{}; std::size_t allocation_size{}; { // Lock the table. - std::lock_guard lk(page_table_lock); + KScopedLightLock lk(general_lock); // Validate that setting heap size is possible at all. R_UNLESS(!is_kernel, ResultOutOfMemory); @@ -955,14 +1471,21 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the heap extension. - KPageLinkedList page_linked_list; - R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize, - memory_pool, allocation_option)); + KPageLinkedList pg; + R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + &pg, allocation_size / PageSize, + KMemoryManager::EncodeOption(memory_pool, allocation_option))); + + // Clear all the newly allocated pages. + for (const auto& it : pg.Nodes()) { + std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, + it.GetSize()); + } // Map the pages. { // Lock the table. - std::lock_guard lk(page_table_lock); + KScopedLightLock lk(general_lock); // Ensure that the heap hasn't changed since we began executing. ASSERT(cur_address == current_heap_end); @@ -976,7 +1499,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { // Map the pages. const auto num_pages = allocation_size / PageSize; - R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup)); + R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); // Clear all the newly allocated pages. for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { @@ -1004,7 +1527,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, bool is_map_only, VAddr region_start, std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm, PAddr map_addr) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); if (!CanContain(region_start, region_num_pages * PageSize, state)) { return ResultInvalidCurrentMemory; @@ -1024,8 +1547,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { KPageLinkedList page_group; - R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool, - allocation_option)); + R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + &page_group, needed_num_pages, + KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); } @@ -1035,7 +1559,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, } ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryPermission perm{}; if (const ResultCode result{CheckMemoryState( @@ -1058,7 +1582,7 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { } ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryPermission perm{}; if (const ResultCode result{CheckMemoryState( @@ -1081,7 +1605,7 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) } ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite; @@ -1108,7 +1632,7 @@ ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { } ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) { - std::lock_guard lock{page_table_lock}; + KScopedLightLock lk(general_lock); KMemoryPermission new_perm = KMemoryPermission::UserReadWrite; @@ -1232,7 +1756,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss return ResultSuccess; } -constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const { +VAddr KPageTable::GetRegionAddress(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: @@ -1268,7 +1792,7 @@ constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const { } } -constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const { +std::size_t KPageTable::GetRegionSize(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 60ae9b9e8..54c6adf8d 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -5,11 +5,12 @@ #pragma once #include <memory> -#include <mutex> +#include "common/common_funcs.h" #include "common/common_types.h" #include "common/page_table.h" #include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_memory_block.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/result.h" @@ -22,17 +23,21 @@ namespace Kernel { class KMemoryBlockManager; -class KPageTable final : NonCopyable { +class KPageTable final { public: + YUZU_NON_COPYABLE(KPageTable); + YUZU_NON_MOVEABLE(KPageTable); + explicit KPageTable(Core::System& system_); + ~KPageTable(); ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, KMemoryPermission perm); - ResultCode MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - ResultCode UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); + ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, VAddr src_addr); ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); @@ -41,7 +46,14 @@ public: ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, KMemoryPermission perm); + ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, + PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { + return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, + this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, + state, perm); + } ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state); + ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); KMemoryInfo QueryInfo(VAddr addr); @@ -86,6 +98,9 @@ private: ResultCode InitializeMemoryLayout(VAddr start, VAddr end); ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, KMemoryPermission perm); + ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, + PAddr phys_addr, bool is_pa_valid, VAddr region_start, + std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm); ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list); bool IsRegionMapped(VAddr address, u64 size); bool IsRegionContiguous(VAddr addr, u64 size) const; @@ -97,8 +112,11 @@ private: OperationType operation); ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, OperationType operation, PAddr map_addr = 0); - constexpr VAddr GetRegionAddress(KMemoryState state) const; - constexpr std::size_t GetRegionSize(KMemoryState state) const; + VAddr GetRegionAddress(KMemoryState state) const; + std::size_t GetRegionSize(KMemoryState state) const; + + VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, + std::size_t alignment, std::size_t offset, std::size_t guard_pages); ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, KMemoryState state_mask, @@ -132,7 +150,7 @@ private: return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); } - ResultCode CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, + ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, @@ -142,11 +160,12 @@ private: } bool IsLockedByCurrentThread() const { - return true; + return general_lock.IsLockedByCurrentThread(); } - std::recursive_mutex page_table_lock; - std::mutex map_physical_memory_lock; + mutable KLightLock general_lock; + mutable KLightLock map_physical_memory_lock; + std::unique_ptr<KMemoryBlockManager> block_manager; public: @@ -204,8 +223,8 @@ public: constexpr VAddr GetAliasCodeRegionSize() const { return alias_code_region_end - alias_code_region_start; } - size_t GetNormalMemorySize() { - std::lock_guard lk(page_table_lock); + std::size_t GetNormalMemorySize() { + KScopedLightLock lk(general_lock); return GetHeapSize() + mapped_physical_memory_size; } constexpr std::size_t GetAddressSpaceWidth() const { @@ -247,7 +266,10 @@ public: constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { return !IsOutsideASLRRegion(address, size); } - constexpr PAddr GetPhysicalAddr(VAddr addr) { + constexpr std::size_t GetNumGuardPages() const { + return IsKernel() ? 1 : 4; + } + PAddr GetPhysicalAddr(VAddr addr) const { const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; ASSERT(backing_addr); return backing_addr + addr; @@ -268,10 +290,6 @@ private: return is_aslr_enabled; } - constexpr std::size_t GetNumGuardPages() const { - return IsKernel() ? 1 : 4; - } - constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { return (address_space_start <= addr) && (num_pages <= (address_space_end - address_space_start) / PageSize) && @@ -303,6 +321,8 @@ private: bool is_kernel{}; bool is_aslr_enabled{}; + u32 heap_fill_value{}; + KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp index a8ba09c4a..ceb98709f 100644 --- a/src/core/hle/kernel/k_port.cpp +++ b/src/core/hle/kernel/k_port.cpp @@ -57,7 +57,12 @@ ResultCode KPort::EnqueueSession(KServerSession* session) { R_UNLESS(state == State::Normal, ResultPortClosed); server.EnqueueSession(session); - server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession()); + + if (auto session_ptr = server.GetSessionRequestHandler().lock()) { + session_ptr->ClientConnected(server.AcceptSession()); + } else { + UNREACHABLE(); + } return ResultSuccess; } diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 85c506979..b39405496 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -70,65 +70,12 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority } } // Anonymous namespace -// Represents a page used for thread-local storage. -// -// Each TLS page contains slots that may be used by processes and threads. -// Every process and thread is created with a slot in some arbitrary page -// (whichever page happens to have an available slot). -class TLSPage { -public: - static constexpr std::size_t num_slot_entries = - Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE; - - explicit TLSPage(VAddr address) : base_address{address} {} - - bool HasAvailableSlots() const { - return !is_slot_used.all(); - } - - VAddr GetBaseAddress() const { - return base_address; - } - - std::optional<VAddr> ReserveSlot() { - for (std::size_t i = 0; i < is_slot_used.size(); i++) { - if (is_slot_used[i]) { - continue; - } - - is_slot_used[i] = true; - return base_address + (i * Core::Memory::TLS_ENTRY_SIZE); - } - - return std::nullopt; - } - - void ReleaseSlot(VAddr address) { - // Ensure that all given addresses are consistent with how TLS pages - // are intended to be used when releasing slots. - ASSERT(IsWithinPage(address)); - ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0); - - const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE; - is_slot_used[index] = false; - } - -private: - bool IsWithinPage(VAddr address) const { - return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE; - } - - VAddr base_address; - std::bitset<num_slot_entries> is_slot_used; -}; - ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name, - ProcessType type) { + ProcessType type, KResourceLimit* res_limit) { auto& kernel = system.Kernel(); process->name = std::move(process_name); - - process->resource_limit = kernel.GetSystemResourceLimit(); + process->resource_limit = res_limit; process->status = ProcessStatus::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() @@ -143,9 +90,6 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st kernel.AppendNewProcess(process); - // Open a reference to the resource limit. - process->resource_limit->Open(); - // Clear remaining fields. process->num_running_threads = 0; process->is_signaled = false; @@ -153,6 +97,9 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st process->is_suspended = false; process->schedule_count = 0; + // Open a reference to the resource limit. + process->resource_limit->Open(); + return ResultSuccess; } @@ -405,7 +352,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, } // Create TLS region - tls_region_address = CreateTLSRegion(); + R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); memory_reservation.Commit(); return handle_table.Initialize(capabilities.GetHandleTableSize()); @@ -445,7 +392,7 @@ void KProcess::PrepareForTermination() { stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); - FreeTLSRegion(tls_region_address); + this->DeleteThreadLocalRegion(tls_region_address); tls_region_address = 0; if (resource_limit) { @@ -457,9 +404,6 @@ void KProcess::PrepareForTermination() { } void KProcess::Finalize() { - // Finalize the handle table and close any open handles. - handle_table.Finalize(); - // Free all shared memory infos. { auto it = shared_memory_list.begin(); @@ -484,67 +428,110 @@ void KProcess::Finalize() { resource_limit = nullptr; } + // Finalize the page table. + page_table.reset(); + // Perform inherited finalization. KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); } -/** - * Attempts to find a TLS page that contains a free slot for - * use by a thread. - * - * @returns If a page with an available slot is found, then an iterator - * pointing to the page is returned. Otherwise the end iterator - * is returned instead. - */ -static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) { - return std::find_if(tls_pages.begin(), tls_pages.end(), - [](const auto& page) { return page.HasAvailableSlots(); }); -} +ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) { + KThreadLocalPage* tlp = nullptr; + VAddr tlr = 0; -VAddr KProcess::CreateTLSRegion() { - KScopedSchedulerLock lock(kernel); - if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; - tls_page_iter != tls_pages.cend()) { - return *tls_page_iter->ReserveSlot(); - } + // See if we can get a region from a partially used TLP. + { + KScopedSchedulerLock sl{kernel}; - Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()}; - ASSERT(tls_page_ptr); + if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { + tlr = it->Reserve(); + ASSERT(tlr != 0); - const VAddr start{page_table->GetKernelMapRegionStart()}; - const VAddr size{page_table->GetKernelMapRegionEnd() - start}; - const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; - const VAddr tls_page_addr{page_table - ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize, - KMemoryState::ThreadLocal, - KMemoryPermission::UserReadWrite, - tls_map_addr) - .ValueOr(0)}; + if (it->IsAllUsed()) { + tlp = std::addressof(*it); + partially_used_tlp_tree.erase(it); + fully_used_tlp_tree.insert(*tlp); + } + + *out = tlr; + return ResultSuccess; + } + } - ASSERT(tls_page_addr); + // Allocate a new page. + tlp = KThreadLocalPage::Allocate(kernel); + R_UNLESS(tlp != nullptr, ResultOutOfMemory); + auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); }); - std::memset(tls_page_ptr, 0, PageSize); - tls_pages.emplace_back(tls_page_addr); + // Initialize the new page. + R_TRY(tlp->Initialize(kernel, this)); - const auto reserve_result{tls_pages.back().ReserveSlot()}; - ASSERT(reserve_result.has_value()); + // Reserve a TLR. + tlr = tlp->Reserve(); + ASSERT(tlr != 0); - return *reserve_result; + // Insert into our tree. + { + KScopedSchedulerLock sl{kernel}; + if (tlp->IsAllUsed()) { + fully_used_tlp_tree.insert(*tlp); + } else { + partially_used_tlp_tree.insert(*tlp); + } + } + + // We succeeded! + tlp_guard.Cancel(); + *out = tlr; + return ResultSuccess; } -void KProcess::FreeTLSRegion(VAddr tls_address) { - KScopedSchedulerLock lock(kernel); - const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); - auto iter = - std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { - return page.GetBaseAddress() == aligned_address; - }); +ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) { + KThreadLocalPage* page_to_free = nullptr; + + // Release the region. + { + KScopedSchedulerLock sl{kernel}; + + // Try to find the page in the partially used list. + auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); + if (it == partially_used_tlp_tree.end()) { + // If we don't find it, it has to be in the fully used list. + it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); + R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress); + + // Release the region. + it->Release(addr); + + // Move the page out of the fully used list. + KThreadLocalPage* tlp = std::addressof(*it); + fully_used_tlp_tree.erase(it); + if (tlp->IsAllFree()) { + page_to_free = tlp; + } else { + partially_used_tlp_tree.insert(*tlp); + } + } else { + // Release the region. + it->Release(addr); + + // Handle the all-free case. + KThreadLocalPage* tlp = std::addressof(*it); + if (tlp->IsAllFree()) { + partially_used_tlp_tree.erase(it); + page_to_free = tlp; + } + } + } + + // If we should free the page it was in, do so. + if (page_to_free != nullptr) { + page_to_free->Finalize(); - // Something has gone very wrong if we're freeing a region - // with no actual page available. - ASSERT(iter != tls_pages.cend()); + KThreadLocalPage::Free(kernel, page_to_free); + } - iter->ReleaseSlot(tls_address); + return ResultSuccess; } void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index 38b446350..5ed0f2d83 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -15,6 +15,7 @@ #include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/kernel/k_thread_local_page.h" #include "core/hle/kernel/k_worker_task.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/slab_helpers.h" @@ -91,7 +92,7 @@ public: static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name, - ProcessType type); + ProcessType type, KResourceLimit* res_limit); /// Gets a reference to the process' page table. KPageTable& PageTable() { @@ -362,10 +363,10 @@ public: // Thread-local storage management // Marks the next available region as used and returns the address of the slot. - [[nodiscard]] VAddr CreateTLSRegion(); + [[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out); // Frees a used TLS slot identified by the given address - void FreeTLSRegion(VAddr tls_address); + ResultCode DeleteThreadLocalRegion(VAddr addr); private: void PinThread(s32 core_id, KThread* thread) { @@ -413,13 +414,6 @@ private: /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; - /// The Thread Local Storage area is allocated as processes create threads, - /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part - /// holds the TLS for a specific thread. This vector contains which parts are in use for each - /// page as a bitmask. - /// This vector will grow as more pages are allocated for new threads. - std::vector<TLSPage> tls_pages; - /// Contains the parsed process capability descriptors. ProcessCapabilities capabilities; @@ -482,6 +476,12 @@ private: KThread* exception_thread{}; KLightLock state_lock; + + using TLPTree = + Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>; + using TLPIterator = TLPTree::iterator; + TLPTree fully_used_tlp_tree; + TLPTree partially_used_tlp_tree; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp index 0c4bba66b..a84977c68 100644 --- a/src/core/hle/kernel/k_resource_limit.cpp +++ b/src/core/hle/kernel/k_resource_limit.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include "common/assert.h" +#include "core/core.h" #include "core/core_timing.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/svc_results.h" @@ -151,4 +152,22 @@ void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) { } } +KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) { + auto* resource_limit = KResourceLimit::Create(system.Kernel()); + resource_limit->Initialize(&system.CoreTiming()); + + // Initialize default resource limit values. + // TODO(bunnei): These values are the system defaults, the limits for service processes are + // lower. These should use the correct limit values. + + ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, physical_memory_size) + .IsSuccess()); + ASSERT(resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess()); + ASSERT(resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess()); + ASSERT(resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200).IsSuccess()); + ASSERT(resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess()); + + return resource_limit; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h index fab6005ff..d23d16aa4 100644 --- a/src/core/hle/kernel/k_resource_limit.h +++ b/src/core/hle/kernel/k_resource_limit.h @@ -67,4 +67,7 @@ private: KLightConditionVariable cond_var; const Core::Timing::CoreTiming* core_timing{}; }; + +KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size); + } // namespace Kernel diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h index 6302d5e61..2185736be 100644 --- a/src/core/hle/kernel/k_server_port.h +++ b/src/core/hle/kernel/k_server_port.h @@ -30,11 +30,11 @@ public: /// Whether or not this server port has an HLE handler available. bool HasSessionRequestHandler() const { - return session_handler != nullptr; + return !session_handler.expired(); } /// Gets the HLE handler for this port. - SessionRequestHandlerPtr GetSessionRequestHandler() const { + SessionRequestHandlerWeakPtr GetSessionRequestHandler() const { return session_handler; } @@ -42,7 +42,7 @@ public: * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port * will inherit a reference to this handler. */ - void SetSessionHandler(SessionRequestHandlerPtr&& handler) { + void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) { session_handler = std::move(handler); } @@ -66,7 +66,7 @@ private: void CleanupSessions(); SessionList session_list; - SessionRequestHandlerPtr session_handler; + SessionRequestHandlerWeakPtr session_handler; KPort* parent{}; }; diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 4d94eb9cf..30c56ff29 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -27,10 +27,7 @@ namespace Kernel { KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} -KServerSession::~KServerSession() { - // Ensure that the global list tracking server sessions does not hold on to a reference. - kernel.UnregisterServerSession(this); -} +KServerSession::~KServerSession() = default; void KServerSession::Initialize(KSession* parent_session_, std::string&& name_, std::shared_ptr<SessionRequestManager> manager_) { @@ -49,6 +46,9 @@ void KServerSession::Destroy() { parent->OnServerClosed(); parent->Close(); + + // Release host emulation members. + manager.reset(); } void KServerSession::OnClientClosed() { @@ -98,7 +98,12 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co UNREACHABLE(); return ResultSuccess; // Ignore error if asserts are off } - return manager->DomainHandler(object_id - 1)->HandleSyncRequest(*this, context); + if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) { + return strong_ptr->HandleSyncRequest(*this, context); + } else { + UNREACHABLE(); + return ResultSuccess; + } case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 0ad74b0a0..5690cc757 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h @@ -7,6 +7,7 @@ #include <atomic> #include "common/assert.h" +#include "common/common_funcs.h" #include "common/common_types.h" namespace Kernel { @@ -15,35 +16,34 @@ class KernelCore; namespace impl { -class KSlabHeapImpl final : NonCopyable { +class KSlabHeapImpl { + YUZU_NON_COPYABLE(KSlabHeapImpl); + YUZU_NON_MOVEABLE(KSlabHeapImpl); + public: struct Node { Node* next{}; }; +public: constexpr KSlabHeapImpl() = default; - void Initialize(std::size_t size) { - ASSERT(head == nullptr); - obj_size = size; - } - - constexpr std::size_t GetObjectSize() const { - return obj_size; + void Initialize() { + ASSERT(m_head == nullptr); } Node* GetHead() const { - return head; + return m_head; } void* Allocate() { - Node* ret = head.load(); + Node* ret = m_head.load(); do { if (ret == nullptr) { break; } - } while (!head.compare_exchange_weak(ret, ret->next)); + } while (!m_head.compare_exchange_weak(ret, ret->next)); return ret; } @@ -51,166 +51,157 @@ public: void Free(void* obj) { Node* node = static_cast<Node*>(obj); - Node* cur_head = head.load(); + Node* cur_head = m_head.load(); do { node->next = cur_head; - } while (!head.compare_exchange_weak(cur_head, node)); + } while (!m_head.compare_exchange_weak(cur_head, node)); } private: - std::atomic<Node*> head{}; - std::size_t obj_size{}; + std::atomic<Node*> m_head{}; }; } // namespace impl -class KSlabHeapBase : NonCopyable { +template <bool SupportDynamicExpansion> +class KSlabHeapBase : protected impl::KSlabHeapImpl { + YUZU_NON_COPYABLE(KSlabHeapBase); + YUZU_NON_MOVEABLE(KSlabHeapBase); + +private: + size_t m_obj_size{}; + uintptr_t m_peak{}; + uintptr_t m_start{}; + uintptr_t m_end{}; + +private: + void UpdatePeakImpl(uintptr_t obj) { + static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free); + std::atomic_ref<uintptr_t> peak_ref(m_peak); + + const uintptr_t alloc_peak = obj + this->GetObjectSize(); + uintptr_t cur_peak = m_peak; + do { + if (alloc_peak <= cur_peak) { + break; + } + } while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak)); + } + public: constexpr KSlabHeapBase() = default; - constexpr bool Contains(uintptr_t addr) const { - return start <= addr && addr < end; + bool Contains(uintptr_t address) const { + return m_start <= address && address < m_end; } - constexpr std::size_t GetSlabHeapSize() const { - return (end - start) / GetObjectSize(); - } + void Initialize(size_t obj_size, void* memory, size_t memory_size) { + // Ensure we don't initialize a slab using null memory. + ASSERT(memory != nullptr); - constexpr std::size_t GetObjectSize() const { - return impl.GetObjectSize(); - } + // Set our object size. + m_obj_size = obj_size; - constexpr uintptr_t GetSlabHeapAddress() const { - return start; - } + // Initialize the base allocator. + KSlabHeapImpl::Initialize(); + + // Set our tracking variables. + const size_t num_obj = (memory_size / obj_size); + m_start = reinterpret_cast<uintptr_t>(memory); + m_end = m_start + num_obj * obj_size; + m_peak = m_start; - std::size_t GetObjectIndexImpl(const void* obj) const { - return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize(); + // Free the objects. + u8* cur = reinterpret_cast<u8*>(m_end); + + for (size_t i = 0; i < num_obj; i++) { + cur -= obj_size; + KSlabHeapImpl::Free(cur); + } } - std::size_t GetPeakIndex() const { - return GetObjectIndexImpl(reinterpret_cast<const void*>(peak)); + size_t GetSlabHeapSize() const { + return (m_end - m_start) / this->GetObjectSize(); } - void* AllocateImpl() { - return impl.Allocate(); + size_t GetObjectSize() const { + return m_obj_size; } - void FreeImpl(void* obj) { - // Don't allow freeing an object that wasn't allocated from this heap - ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); + void* Allocate() { + void* obj = KSlabHeapImpl::Allocate(); - impl.Free(obj); + return obj; } - void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) { - // Ensure we don't initialize a slab using null memory - ASSERT(memory != nullptr); - - // Initialize the base allocator - impl.Initialize(obj_size); + void Free(void* obj) { + // Don't allow freeing an object that wasn't allocated from this heap. + const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj)); + ASSERT(contained); + KSlabHeapImpl::Free(obj); + } - // Set our tracking variables - const std::size_t num_obj = (memory_size / obj_size); - start = reinterpret_cast<uintptr_t>(memory); - end = start + num_obj * obj_size; - peak = start; + size_t GetObjectIndex(const void* obj) const { + if constexpr (SupportDynamicExpansion) { + if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) { + return std::numeric_limits<size_t>::max(); + } + } - // Free the objects - u8* cur = reinterpret_cast<u8*>(end); + return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize(); + } - for (std::size_t i{}; i < num_obj; i++) { - cur -= obj_size; - impl.Free(cur); - } + size_t GetPeakIndex() const { + return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak)); } -private: - using Impl = impl::KSlabHeapImpl; + uintptr_t GetSlabHeapAddress() const { + return m_start; + } - Impl impl; - uintptr_t peak{}; - uintptr_t start{}; - uintptr_t end{}; + size_t GetNumRemaining() const { + // Only calculate the number of remaining objects under debug configuration. + return 0; + } }; template <typename T> -class KSlabHeap final : public KSlabHeapBase { -public: - enum class AllocationType { - Host, - Guest, - }; +class KSlabHeap final : public KSlabHeapBase<false> { +private: + using BaseHeap = KSlabHeapBase<false>; - explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host) - : KSlabHeapBase(), allocation_type{allocation_type_} {} +public: + constexpr KSlabHeap() = default; - void Initialize(void* memory, std::size_t memory_size) { - if (allocation_type == AllocationType::Guest) { - InitializeImpl(sizeof(T), memory, memory_size); - } + void Initialize(void* memory, size_t memory_size) { + BaseHeap::Initialize(sizeof(T), memory, memory_size); } T* Allocate() { - switch (allocation_type) { - case AllocationType::Host: - // Fallback for cases where we do not yet support allocating guest memory from the slab - // heap, such as for kernel memory regions. - return new T; - - case AllocationType::Guest: - T* obj = static_cast<T*>(AllocateImpl()); - if (obj != nullptr) { - new (obj) T(); - } - return obj; - } + T* obj = static_cast<T*>(BaseHeap::Allocate()); - UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); - return nullptr; + if (obj != nullptr) [[likely]] { + std::construct_at(obj); + } + return obj; } - T* AllocateWithKernel(KernelCore& kernel) { - switch (allocation_type) { - case AllocationType::Host: - // Fallback for cases where we do not yet support allocating guest memory from the slab - // heap, such as for kernel memory regions. - return new T(kernel); + T* Allocate(KernelCore& kernel) { + T* obj = static_cast<T*>(BaseHeap::Allocate()); - case AllocationType::Guest: - T* obj = static_cast<T*>(AllocateImpl()); - if (obj != nullptr) { - new (obj) T(kernel); - } - return obj; + if (obj != nullptr) [[likely]] { + std::construct_at(obj, kernel); } - - UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); - return nullptr; + return obj; } void Free(T* obj) { - switch (allocation_type) { - case AllocationType::Host: - // Fallback for cases where we do not yet support allocating guest memory from the slab - // heap, such as for kernel memory regions. - delete obj; - return; - - case AllocationType::Guest: - FreeImpl(obj); - return; - } - - UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); + BaseHeap::Free(obj); } - constexpr std::size_t GetObjectIndex(const T* obj) const { - return GetObjectIndexImpl(obj); + size_t GetObjectIndex(const T* obj) const { + return BaseHeap::GetObjectIndex(obj); } - -private: - const AllocationType allocation_type; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index de3ffe0c7..ba7f72c6b 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -210,7 +210,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s if (owner != nullptr) { // Setup the TLS, if needed. if (type == ThreadType::User) { - tls_address = owner->CreateTLSRegion(); + R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address))); } parent = owner; @@ -305,7 +305,7 @@ void KThread::Finalize() { // If the thread has a local region, delete it. if (tls_address != 0) { - parent->FreeTLSRegion(tls_address); + ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess()); } // Release any waiters. @@ -326,6 +326,9 @@ void KThread::Finalize() { } } + // Release host emulation members. + host_context.reset(); + // Perform inherited finalization. KSynchronizationObject::Finalize(); } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index d058db62c..f46db7298 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -656,7 +656,7 @@ private: static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); struct ConditionVariableComparator { - struct LightCompareType { + struct RedBlackKeyType { u64 cv_key{}; s32 priority{}; @@ -672,8 +672,8 @@ private: template <typename T> requires( std::same_as<T, KThread> || - std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs, - const KThread& rhs) { + std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs, + const KThread& rhs) { const u64 l_key = lhs.GetConditionVariableKey(); const u64 r_key = rhs.GetConditionVariableKey(); diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp new file mode 100644 index 000000000..17b233fca --- /dev/null +++ b/src/core/hle/kernel/k_thread_local_page.cpp @@ -0,0 +1,66 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/scope_exit.h" +#include "core/core.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_thread_local_page.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel { + +ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) { + // Set that this process owns us. + m_owner = process; + m_kernel = &kernel; + + // Allocate a new page. + KPageBuffer* page_buf = KPageBuffer::Allocate(kernel); + R_UNLESS(page_buf != nullptr, ResultOutOfMemory); + auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); }); + + // Map the address in. + const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf); + R_TRY(m_owner->PageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, phys_addr, + KMemoryState::ThreadLocal, + KMemoryPermission::UserReadWrite)); + + // We succeeded. + page_buf_guard.Cancel(); + + return ResultSuccess; +} + +ResultCode KThreadLocalPage::Finalize() { + // Get the physical address of the page. + const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr); + ASSERT(phys_addr); + + // Unmap the page. + R_TRY(m_owner->PageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); + + // Free the page. + KPageBuffer::Free(*m_kernel, KPageBuffer::FromPhysicalAddress(m_kernel->System(), phys_addr)); + + return ResultSuccess; +} + +VAddr KThreadLocalPage::Reserve() { + for (size_t i = 0; i < m_is_region_free.size(); i++) { + if (m_is_region_free[i]) { + m_is_region_free[i] = false; + return this->GetRegionAddress(i); + } + } + + return 0; +} + +void KThreadLocalPage::Release(VAddr addr) { + m_is_region_free[this->GetRegionIndex(addr)] = true; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h new file mode 100644 index 000000000..658c67e94 --- /dev/null +++ b/src/core/hle/kernel/k_thread_local_page.h @@ -0,0 +1,112 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <algorithm> +#include <array> + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" +#include "core/hle/kernel/k_page_buffer.h" +#include "core/hle/kernel/memory_types.h" +#include "core/hle/kernel/slab_helpers.h" +#include "core/hle/result.h" + +namespace Kernel { + +class KernelCore; +class KProcess; + +class KThreadLocalPage final : public Common::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>, + public KSlabAllocated<KThreadLocalPage> { +public: + static constexpr size_t RegionsPerPage = PageSize / Svc::ThreadLocalRegionSize; + static_assert(RegionsPerPage > 0); + +public: + constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) { + m_is_region_free.fill(true); + } + + constexpr VAddr GetAddress() const { + return m_virt_addr; + } + + ResultCode Initialize(KernelCore& kernel, KProcess* process); + ResultCode Finalize(); + + VAddr Reserve(); + void Release(VAddr addr); + + bool IsAllUsed() const { + return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(), + [](bool is_free) { return !is_free; }); + } + + bool IsAllFree() const { + return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(), + [](bool is_free) { return is_free; }); + } + + bool IsAnyUsed() const { + return !this->IsAllFree(); + } + + bool IsAnyFree() const { + return !this->IsAllUsed(); + } + +public: + using RedBlackKeyType = VAddr; + + static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) { + return v; + } + static constexpr RedBlackKeyType GetRedBlackKey(const KThreadLocalPage& v) { + return v.GetAddress(); + } + + template <typename T> + requires(std::same_as<T, KThreadLocalPage> || + std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs, + const KThreadLocalPage& + rhs) { + const VAddr lval = GetRedBlackKey(lhs); + const VAddr rval = GetRedBlackKey(rhs); + + if (lval < rval) { + return -1; + } else if (lval == rval) { + return 0; + } else { + return 1; + } + } + +private: + constexpr VAddr GetRegionAddress(size_t i) const { + return this->GetAddress() + i * Svc::ThreadLocalRegionSize; + } + + constexpr bool Contains(VAddr addr) const { + return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize; + } + + constexpr size_t GetRegionIndex(VAddr addr) const { + ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize)); + ASSERT(this->Contains(addr)); + return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize; + } + +private: + VAddr m_virt_addr{}; + KProcess* m_owner{}; + KernelCore* m_kernel{}; + std::array<bool, RegionsPerPage> m_is_region_free{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 49c0714ed..f9828bc43 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -52,7 +52,7 @@ namespace Kernel { struct KernelCore::Impl { explicit Impl(Core::System& system_, KernelCore& kernel_) - : time_manager{system_}, object_list_container{kernel_}, + : time_manager{system_}, service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {} void SetMulticore(bool is_multi) { @@ -60,6 +60,7 @@ struct KernelCore::Impl { } void Initialize(KernelCore& kernel) { + global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel); global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel); global_handle_table->Initialize(KHandleTable::MaxTableSize); @@ -70,14 +71,13 @@ struct KernelCore::Impl { // Derive the initial memory layout from the emulated board Init::InitializeSlabResourceCounts(kernel); - KMemoryLayout memory_layout; - DeriveInitialMemoryLayout(memory_layout); - Init::InitializeSlabHeaps(system, memory_layout); + DeriveInitialMemoryLayout(); + Init::InitializeSlabHeaps(system, *memory_layout); // Initialize kernel memory and resources. - InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); - InitializeMemoryLayout(memory_layout); - InitializePageSlab(); + InitializeSystemResourceLimit(kernel, system.CoreTiming()); + InitializeMemoryLayout(); + Init::InitializeKPageBufferSlabHeap(system); InitializeSchedulers(); InitializeSuspendThreads(); InitializePreemption(kernel); @@ -108,19 +108,6 @@ struct KernelCore::Impl { for (auto* server_port : server_ports_) { server_port->Close(); } - // Close all open server sessions. - std::unordered_set<KServerSession*> server_sessions_; - { - std::lock_guard lk(server_sessions_lock); - server_sessions_ = server_sessions; - server_sessions.clear(); - } - for (auto* server_session : server_sessions_) { - server_session->Close(); - } - - // Ensure that the object list container is finalized and properly shutdown. - object_list_container.Finalize(); // Ensures all service threads gracefully shutdown. ClearServiceThreads(); @@ -195,11 +182,15 @@ struct KernelCore::Impl { { std::lock_guard lk(registered_objects_lock); if (registered_objects.size()) { - LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!", - registered_objects.size()); + LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!", + registered_objects.size()); registered_objects.clear(); } } + + // Ensure that the object list container is finalized and properly shutdown. + global_object_list_container->Finalize(); + global_object_list_container.reset(); } void InitializePhysicalCores() { @@ -219,12 +210,11 @@ struct KernelCore::Impl { // Creates the default system resource limit void InitializeSystemResourceLimit(KernelCore& kernel, - const Core::Timing::CoreTiming& core_timing, - const KMemoryLayout& memory_layout) { + const Core::Timing::CoreTiming& core_timing) { system_resource_limit = KResourceLimit::Create(system.Kernel()); system_resource_limit->Initialize(&core_timing); - const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes(); + const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes(); // If setting the default system values fails, then something seriously wrong has occurred. ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) @@ -240,13 +230,6 @@ struct KernelCore::Impl { constexpr u64 secure_applet_memory_size{4_MiB}; ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory, secure_applet_memory_size)); - - // This memory seems to be reserved on hardware, but is not reserved/used by yuzu. - // Likely Horizon OS reserved memory - // TODO(ameerj): Derive the memory rather than hardcode it. - constexpr u64 unknown_reserved_memory{0x2f896000}; - ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory, - unknown_reserved_memory)); } void InitializePreemption(KernelCore& kernel) { @@ -300,15 +283,16 @@ struct KernelCore::Impl { // Gets the dummy KThread for the caller, allocating a new one if this is the first time KThread* GetHostDummyThread() { - auto make_thread = [this]() { - KThread* thread = KThread::Create(system.Kernel()); + auto initialize = [this](KThread* thread) { ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); return thread; }; - thread_local KThread* saved_thread = make_thread(); - return saved_thread; + thread_local auto raw_thread = KThread(system.Kernel()); + thread_local auto thread = initialize(&raw_thread); + + return thread; } /// Registers a CPU core thread by allocating a host thread ID for it @@ -360,16 +344,18 @@ struct KernelCore::Impl { return schedulers[thread_id]->GetCurrentThread(); } - void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) { + void DeriveInitialMemoryLayout() { + memory_layout = std::make_unique<KMemoryLayout>(); + // Insert the root region for the virtual memory tree, from which all other regions will // derive. - memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( + memory_layout->GetVirtualMemoryRegionTree().InsertDirectly( KernelVirtualAddressSpaceBase, KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); // Insert the root region for the physical memory tree, from which all other regions will // derive. - memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly( + memory_layout->GetPhysicalMemoryRegionTree().InsertDirectly( KernelPhysicalAddressSpaceBase, KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); @@ -386,7 +372,7 @@ struct KernelCore::Impl { if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; } - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); // Setup the code region. @@ -395,11 +381,11 @@ struct KernelCore::Impl { Common::AlignDown(code_start_virt_addr, CodeRegionAlign); constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); constexpr size_t code_region_size = code_region_end - code_region_start; - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( code_region_start, code_region_size, KMemoryRegionType_KernelCode)); // Setup board-specific device physical regions. - Init::SetupDevicePhysicalMemoryRegions(memory_layout); + Init::SetupDevicePhysicalMemoryRegions(*memory_layout); // Determine the amount of space needed for the misc region. size_t misc_region_needed_size; @@ -408,7 +394,7 @@ struct KernelCore::Impl { misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); // Account for each auto-map device. - for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + for (const auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { // Check that the region is valid. ASSERT(region.GetEndAddress() != 0); @@ -433,22 +419,22 @@ struct KernelCore::Impl { // Setup the misc region. const VAddr misc_region_start = - memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); // Setup the stack region. constexpr size_t StackRegionSize = 14_MiB; constexpr size_t StackRegionAlign = KernelAslrAlignment; const VAddr stack_region_start = - memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); // Determine the size of the resource region. - const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); + const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit(); // Determine the size of the slab region. const size_t slab_region_size = @@ -465,23 +451,23 @@ struct KernelCore::Impl { Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - Common::AlignDown(code_end_phys_addr, SlabRegionAlign); const VAddr slab_region_start = - memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + (code_end_phys_addr % SlabRegionAlign); - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); // Setup the temp region. constexpr size_t TempRegionSize = 128_MiB; constexpr size_t TempRegionAlign = KernelAslrAlignment; const VAddr temp_region_start = - memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( + memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, - KMemoryRegionType_KernelTemp)); + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, + KMemoryRegionType_KernelTemp)); // Automatically map in devices that have auto-map attributes. - for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { // We only care about kernel regions. if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { continue; @@ -508,21 +494,21 @@ struct KernelCore::Impl { const size_t map_size = Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; const VAddr map_virt_addr = - memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( + memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); } - Init::SetupDramPhysicalMemoryRegions(memory_layout); + Init::SetupDramPhysicalMemoryRegions(*memory_layout); // Insert a physical region for the kernel code region. - ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); // Insert a physical region for the kernel slab region. - ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); // Determine size available for kernel page table heaps, requiring > 8 MB. @@ -531,12 +517,12 @@ struct KernelCore::Impl { ASSERT(page_table_heap_size / 4_MiB > 2); // Insert a physical region for the kernel page table heap region - ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); // All DRAM regions that we haven't tagged by this point will be mapped under the linear // mapping. Tag them. - for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { if (region.GetType() == KMemoryRegionType_Dram) { // Check that the region is valid. ASSERT(region.GetEndAddress() != 0); @@ -548,7 +534,7 @@ struct KernelCore::Impl { // Get the linear region extents. const auto linear_extents = - memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( + memory_layout->GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( KMemoryRegionAttr_LinearMapped); ASSERT(linear_extents.GetEndAddress() != 0); @@ -560,7 +546,7 @@ struct KernelCore::Impl { Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - aligned_linear_phys_start; const VAddr linear_region_start = - memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( + memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; @@ -569,7 +555,7 @@ struct KernelCore::Impl { { PAddr cur_phys_addr = 0; u64 cur_size = 0; - for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { continue; } @@ -588,55 +574,49 @@ struct KernelCore::Impl { const VAddr region_virt_addr = region.GetAddress() + linear_region_phys_to_virt_diff; - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( region_virt_addr, region.GetSize(), GetTypeForVirtualLinearMapping(region.GetType()))); region.SetPairAddress(region_virt_addr); KMemoryRegion* virt_region = - memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); + memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); ASSERT(virt_region != nullptr); virt_region->SetPairAddress(region.GetAddress()); } } // Insert regions for the initial page table region. - ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); - ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( + ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, KMemoryRegionType_VirtualDramKernelInitPt)); // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to // some pool partition. Tag them. - for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { + for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { region.SetType(KMemoryRegionType_DramPoolPartition); } } // Setup all other memory regions needed to arrange the pool partitions. - Init::SetupPoolPartitionMemoryRegions(memory_layout); + Init::SetupPoolPartitionMemoryRegions(*memory_layout); // Cache all linear regions in their own trees for faster access, later. - memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, - linear_region_start); + memory_layout->InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, + linear_region_start); } - void InitializeMemoryLayout(const KMemoryLayout& memory_layout) { - const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); - const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); - const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents(); + void InitializeMemoryLayout() { + const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents(); - // Initialize memory managers + // Initialize the memory manager. memory_manager = std::make_unique<KMemoryManager>(system); - memory_manager->InitializeManager(KMemoryManager::Pool::Application, - application_pool.GetAddress(), - application_pool.GetEndAddress()); - memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(), - applet_pool.GetEndAddress()); - memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(), - system_pool.GetEndAddress()); + const auto& management_region = memory_layout->GetPoolManagementRegion(); + ASSERT(management_region.GetEndAddress() != 0); + memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize()); // Setup memory regions for emulated processes // TODO(bunnei): These should not be hardcoded regions initialized within the kernel @@ -673,22 +653,6 @@ struct KernelCore::Impl { time_phys_addr, time_size, "Time:SharedMemory"); } - void InitializePageSlab() { - // Allocate slab heaps - user_slab_heap_pages = - std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest); - - // TODO(ameerj): This should be derived, not hardcoded within the kernel - constexpr u64 user_slab_heap_size{0x3de000}; - // Reserve slab heaps - ASSERT( - system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); - // Initialize slab heap - user_slab_heap_pages->Initialize( - system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), - user_slab_heap_size); - } - KClientPort* CreateNamedServicePort(std::string name) { auto search = service_interface_factory.find(name); if (search == service_interface_factory.end()) { @@ -726,7 +690,6 @@ struct KernelCore::Impl { } std::mutex server_ports_lock; - std::mutex server_sessions_lock; std::mutex registered_objects_lock; std::mutex registered_in_use_objects_lock; @@ -750,14 +713,13 @@ struct KernelCore::Impl { // stores all the objects in place. std::unique_ptr<KHandleTable> global_handle_table; - KAutoObjectWithListContainer object_list_container; + std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container; /// Map of named ports managed by the kernel, which can be retrieved using /// the ConnectToPort SVC. std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory; NamedPortTable named_ports; std::unordered_set<KServerPort*> server_ports; - std::unordered_set<KServerSession*> server_sessions; std::unordered_set<KAutoObject*> registered_objects; std::unordered_set<KAutoObject*> registered_in_use_objects; @@ -769,7 +731,6 @@ struct KernelCore::Impl { // Kernel memory management std::unique_ptr<KMemoryManager> memory_manager; - std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages; // Shared memory for services Kernel::KSharedMemory* hid_shared_mem{}; @@ -777,6 +738,9 @@ struct KernelCore::Impl { Kernel::KSharedMemory* irs_shared_mem{}; Kernel::KSharedMemory* time_shared_mem{}; + // Memory layout + std::unique_ptr<KMemoryLayout> memory_layout; + // Threads used for services std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; Common::ThreadWorker service_threads_manager; @@ -925,11 +889,11 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { } KAutoObjectWithListContainer& KernelCore::ObjectListContainer() { - return impl->object_list_container; + return *impl->global_object_list_container; } const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const { - return impl->object_list_container; + return *impl->global_object_list_container; } void KernelCore::InvalidateAllInstructionCaches() { @@ -959,16 +923,6 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) { return impl->CreateNamedServicePort(std::move(name)); } -void KernelCore::RegisterServerSession(KServerSession* server_session) { - std::lock_guard lk(impl->server_sessions_lock); - impl->server_sessions.insert(server_session); -} - -void KernelCore::UnregisterServerSession(KServerSession* server_session) { - std::lock_guard lk(impl->server_sessions_lock); - impl->server_sessions.erase(server_session); -} - void KernelCore::RegisterKernelObject(KAutoObject* object) { std::lock_guard lk(impl->registered_objects_lock); impl->registered_objects.insert(object); @@ -1041,14 +995,6 @@ const KMemoryManager& KernelCore::MemoryManager() const { return *impl->memory_manager; } -KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() { - return *impl->user_slab_heap_pages; -} - -const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const { - return *impl->user_slab_heap_pages; -} - Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { return *impl->hid_shared_mem; } @@ -1142,6 +1088,10 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const { return impl->worker_task_manager; } +const KMemoryLayout& KernelCore::MemoryLayout() const { + return *impl->memory_layout; +} + bool KernelCore::IsPhantomModeForSingleCore() const { return impl->IsPhantomModeForSingleCore(); } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 0e04fc3bb..7087bbda6 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -41,7 +41,9 @@ class KClientSession; class KEvent; class KHandleTable; class KLinkedListNode; +class KMemoryLayout; class KMemoryManager; +class KPageBuffer; class KPort; class KProcess; class KResourceLimit; @@ -51,6 +53,7 @@ class KSession; class KSharedMemory; class KSharedMemoryInfo; class KThread; +class KThreadLocalPage; class KTransferMemory; class KWorkerTaskManager; class KWritableEvent; @@ -193,14 +196,6 @@ public: /// Opens a port to a service previously registered with RegisterNamedService. KClientPort* CreateNamedServicePort(std::string name); - /// Registers a server session with the gobal emulation state, to be freed on shutdown. This is - /// necessary because we do not emulate processes for HLE sessions. - void RegisterServerSession(KServerSession* server_session); - - /// Unregisters a server session previously registered with RegisterServerSession when it was - /// destroyed during the current emulation session. - void UnregisterServerSession(KServerSession* server_session); - /// Registers all kernel objects with the global emulation state, this is purely for tracking /// leaks after emulation has been shutdown. void RegisterKernelObject(KAutoObject* object); @@ -238,12 +233,6 @@ public: /// Gets the virtual memory manager for the kernel. const KMemoryManager& MemoryManager() const; - /// Gets the slab heap allocated for user space pages. - KSlabHeap<Page>& GetUserSlabHeapPages(); - - /// Gets the slab heap allocated for user space pages. - const KSlabHeap<Page>& GetUserSlabHeapPages() const; - /// Gets the shared memory object for HID services. Kernel::KSharedMemory& GetHidSharedMem(); @@ -335,6 +324,10 @@ public: return slab_heap_container->writeable_event; } else if constexpr (std::is_same_v<T, KCodeMemory>) { return slab_heap_container->code_memory; + } else if constexpr (std::is_same_v<T, KPageBuffer>) { + return slab_heap_container->page_buffer; + } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { + return slab_heap_container->thread_local_page; } } @@ -350,6 +343,9 @@ public: /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks. const KWorkerTaskManager& WorkerTaskManager() const; + /// Gets the memory layout. + const KMemoryLayout& MemoryLayout() const; + private: friend class KProcess; friend class KThread; @@ -393,6 +389,8 @@ private: KSlabHeap<KTransferMemory> transfer_memory; KSlabHeap<KWritableEvent> writeable_event; KSlabHeap<KCodeMemory> code_memory; + KSlabHeap<KPageBuffer> page_buffer; + KSlabHeap<KThreadLocalPage> thread_local_page; }; std::unique_ptr<SlabHeapContainer> slab_heap_container; diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 4eb3a5988..52d25b837 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -49,12 +49,9 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std return; } + // Allocate a dummy guest thread for this host thread. kernel.RegisterHostThread(); - // Ensure the dummy thread allocated for this host thread is closed on exit. - auto* dummy_thread = kernel.GetCurrentEmuThread(); - SCOPE_EXIT({ dummy_thread->Close(); }); - while (true) { std::function<void()> task; diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index f1c11256e..dc1e48fc9 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h @@ -59,7 +59,7 @@ class KAutoObjectWithSlabHeapAndContainer : public Base { private: static Derived* Allocate(KernelCore& kernel) { - return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel); + return kernel.SlabHeap<Derived>().Allocate(kernel); } static void Free(KernelCore& kernel, Derived* obj) { diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 40bb893ac..839171e85 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -396,7 +396,7 @@ static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle // Get the process id. *out_process_id = process->GetId(); - return ResultInvalidHandle; + return ResultSuccess; } static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low, @@ -645,6 +645,10 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) { LOG_DEBUG(Debug_Emulated, "{}", str); } +static void OutputDebugString32(Core::System& system, u32 address, u32 len) { + OutputDebugString(system, address, len); +} + /// Gets system/memory information for the current process static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id) { @@ -1404,7 +1408,7 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha } static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) { - LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}", + LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}", static_cast<void*>(out), address, size); // Get kernel instance. auto& kernel = system.Kernel(); @@ -1438,6 +1442,10 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr return ResultSuccess; } +static ResultCode CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) { + return CreateCodeMemory(system, out, address, size); +} + static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation, VAddr address, size_t size, Svc::MemoryPermission perm) { @@ -1517,6 +1525,12 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han return ResultSuccess; } +static ResultCode ControlCodeMemory32(Core::System& system, Handle code_memory_handle, + u32 operation, u64 address, u64 size, + Svc::MemoryPermission perm) { + return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm); +} + static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address, Handle process_handle, VAddr address) { @@ -2318,7 +2332,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o R_UNLESS(event != nullptr, ResultOutOfResource); // Initialize the event. - event->Initialize("CreateEvent"); + event->Initialize("CreateEvent", kernel.CurrentProcess()); // Commit the thread reservation. event_reservation.Commit(); @@ -2559,9 +2573,9 @@ struct FunctionDef { } // namespace static const FunctionDef SVC_Table_32[] = { - {0x00, nullptr, "Unknown"}, + {0x00, nullptr, "Unknown0"}, {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"}, - {0x02, nullptr, "Unknown"}, + {0x02, nullptr, "SetMemoryPermission32"}, {0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"}, {0x04, SvcWrap32<MapMemory32>, "MapMemory32"}, {0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"}, @@ -2591,97 +2605,97 @@ static const FunctionDef SVC_Table_32[] = { {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"}, {0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"}, {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"}, - {0x20, nullptr, "Unknown"}, + {0x20, nullptr, "SendSyncRequestLight32"}, {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"}, {0x22, nullptr, "SendSyncRequestWithUserBuffer32"}, - {0x23, nullptr, "Unknown"}, + {0x23, nullptr, "SendAsyncRequestWithUserBuffer32"}, {0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"}, {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"}, {0x26, SvcWrap32<Break32>, "Break32"}, - {0x27, nullptr, "OutputDebugString32"}, - {0x28, nullptr, "Unknown"}, + {0x27, SvcWrap32<OutputDebugString32>, "OutputDebugString32"}, + {0x28, nullptr, "ReturnFromException32"}, {0x29, SvcWrap32<GetInfo32>, "GetInfo32"}, - {0x2a, nullptr, "Unknown"}, - {0x2b, nullptr, "Unknown"}, + {0x2a, nullptr, "FlushEntireDataCache32"}, + {0x2b, nullptr, "FlushDataCache32"}, {0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"}, {0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"}, - {0x2e, nullptr, "Unknown"}, - {0x2f, nullptr, "Unknown"}, - {0x30, nullptr, "Unknown"}, - {0x31, nullptr, "Unknown"}, + {0x2e, nullptr, "GetDebugFutureThreadInfo32"}, + {0x2f, nullptr, "GetLastThreadInfo32"}, + {0x30, nullptr, "GetResourceLimitLimitValue32"}, + {0x31, nullptr, "GetResourceLimitCurrentValue32"}, {0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"}, {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"}, {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"}, {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"}, - {0x36, nullptr, "Unknown"}, - {0x37, nullptr, "Unknown"}, - {0x38, nullptr, "Unknown"}, - {0x39, nullptr, "Unknown"}, - {0x3a, nullptr, "Unknown"}, - {0x3b, nullptr, "Unknown"}, - {0x3c, nullptr, "Unknown"}, - {0x3d, nullptr, "Unknown"}, - {0x3e, nullptr, "Unknown"}, - {0x3f, nullptr, "Unknown"}, + {0x36, SvcWrap32<SynchronizePreemptionState>, "SynchronizePreemptionState32"}, + {0x37, nullptr, "GetResourceLimitPeakValue32"}, + {0x38, nullptr, "Unknown38"}, + {0x39, nullptr, "CreateIoPool32"}, + {0x3a, nullptr, "CreateIoRegion32"}, + {0x3b, nullptr, "Unknown3b"}, + {0x3c, nullptr, "KernelDebug32"}, + {0x3d, nullptr, "ChangeKernelTraceState32"}, + {0x3e, nullptr, "Unknown3e"}, + {0x3f, nullptr, "Unknown3f"}, {0x40, nullptr, "CreateSession32"}, {0x41, nullptr, "AcceptSession32"}, - {0x42, nullptr, "Unknown"}, + {0x42, nullptr, "ReplyAndReceiveLight32"}, {0x43, nullptr, "ReplyAndReceive32"}, - {0x44, nullptr, "Unknown"}, + {0x44, nullptr, "ReplyAndReceiveWithUserBuffer32"}, {0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"}, - {0x46, nullptr, "Unknown"}, - {0x47, nullptr, "Unknown"}, - {0x48, nullptr, "Unknown"}, - {0x49, nullptr, "Unknown"}, - {0x4a, nullptr, "Unknown"}, - {0x4b, nullptr, "Unknown"}, - {0x4c, nullptr, "Unknown"}, - {0x4d, nullptr, "Unknown"}, - {0x4e, nullptr, "Unknown"}, - {0x4f, nullptr, "Unknown"}, - {0x50, nullptr, "Unknown"}, - {0x51, nullptr, "Unknown"}, - {0x52, nullptr, "Unknown"}, - {0x53, nullptr, "Unknown"}, - {0x54, nullptr, "Unknown"}, - {0x55, nullptr, "Unknown"}, - {0x56, nullptr, "Unknown"}, - {0x57, nullptr, "Unknown"}, - {0x58, nullptr, "Unknown"}, - {0x59, nullptr, "Unknown"}, - {0x5a, nullptr, "Unknown"}, - {0x5b, nullptr, "Unknown"}, - {0x5c, nullptr, "Unknown"}, - {0x5d, nullptr, "Unknown"}, - {0x5e, nullptr, "Unknown"}, + {0x46, nullptr, "MapIoRegion32"}, + {0x47, nullptr, "UnmapIoRegion32"}, + {0x48, nullptr, "MapPhysicalMemoryUnsafe32"}, + {0x49, nullptr, "UnmapPhysicalMemoryUnsafe32"}, + {0x4a, nullptr, "SetUnsafeLimit32"}, + {0x4b, SvcWrap32<CreateCodeMemory32>, "CreateCodeMemory32"}, + {0x4c, SvcWrap32<ControlCodeMemory32>, "ControlCodeMemory32"}, + {0x4d, nullptr, "SleepSystem32"}, + {0x4e, nullptr, "ReadWriteRegister32"}, + {0x4f, nullptr, "SetProcessActivity32"}, + {0x50, nullptr, "CreateSharedMemory32"}, + {0x51, nullptr, "MapTransferMemory32"}, + {0x52, nullptr, "UnmapTransferMemory32"}, + {0x53, nullptr, "CreateInterruptEvent32"}, + {0x54, nullptr, "QueryPhysicalAddress32"}, + {0x55, nullptr, "QueryIoMapping32"}, + {0x56, nullptr, "CreateDeviceAddressSpace32"}, + {0x57, nullptr, "AttachDeviceAddressSpace32"}, + {0x58, nullptr, "DetachDeviceAddressSpace32"}, + {0x59, nullptr, "MapDeviceAddressSpaceByForce32"}, + {0x5a, nullptr, "MapDeviceAddressSpaceAligned32"}, + {0x5b, nullptr, "MapDeviceAddressSpace32"}, + {0x5c, nullptr, "UnmapDeviceAddressSpace32"}, + {0x5d, nullptr, "InvalidateProcessDataCache32"}, + {0x5e, nullptr, "StoreProcessDataCache32"}, {0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"}, - {0x60, nullptr, "Unknown"}, - {0x61, nullptr, "Unknown"}, - {0x62, nullptr, "Unknown"}, - {0x63, nullptr, "Unknown"}, - {0x64, nullptr, "Unknown"}, + {0x60, nullptr, "StoreProcessDataCache32"}, + {0x61, nullptr, "BreakDebugProcess32"}, + {0x62, nullptr, "TerminateDebugProcess32"}, + {0x63, nullptr, "GetDebugEvent32"}, + {0x64, nullptr, "ContinueDebugEvent32"}, {0x65, nullptr, "GetProcessList32"}, - {0x66, nullptr, "Unknown"}, - {0x67, nullptr, "Unknown"}, - {0x68, nullptr, "Unknown"}, - {0x69, nullptr, "Unknown"}, - {0x6A, nullptr, "Unknown"}, - {0x6B, nullptr, "Unknown"}, - {0x6C, nullptr, "Unknown"}, - {0x6D, nullptr, "Unknown"}, - {0x6E, nullptr, "Unknown"}, + {0x66, nullptr, "GetThreadList"}, + {0x67, nullptr, "GetDebugThreadContext32"}, + {0x68, nullptr, "SetDebugThreadContext32"}, + {0x69, nullptr, "QueryDebugProcessMemory32"}, + {0x6A, nullptr, "ReadDebugProcessMemory32"}, + {0x6B, nullptr, "WriteDebugProcessMemory32"}, + {0x6C, nullptr, "SetHardwareBreakPoint32"}, + {0x6D, nullptr, "GetDebugThreadParam32"}, + {0x6E, nullptr, "Unknown6E"}, {0x6f, nullptr, "GetSystemInfo32"}, {0x70, nullptr, "CreatePort32"}, {0x71, nullptr, "ManageNamedPort32"}, {0x72, nullptr, "ConnectToPort32"}, {0x73, nullptr, "SetProcessMemoryPermission32"}, - {0x74, nullptr, "Unknown"}, - {0x75, nullptr, "Unknown"}, - {0x76, nullptr, "Unknown"}, + {0x74, nullptr, "MapProcessMemory32"}, + {0x75, nullptr, "UnmapProcessMemory32"}, + {0x76, nullptr, "QueryProcessMemory32"}, {0x77, nullptr, "MapProcessCodeMemory32"}, {0x78, nullptr, "UnmapProcessCodeMemory32"}, - {0x79, nullptr, "Unknown"}, - {0x7A, nullptr, "Unknown"}, + {0x79, nullptr, "CreateProcess32"}, + {0x7A, nullptr, "StartProcess32"}, {0x7B, nullptr, "TerminateProcess32"}, {0x7C, nullptr, "GetProcessInfo32"}, {0x7D, nullptr, "CreateResourceLimit32"}, @@ -2754,7 +2768,7 @@ static const FunctionDef SVC_Table_32[] = { }; static const FunctionDef SVC_Table_64[] = { - {0x00, nullptr, "Unknown"}, + {0x00, nullptr, "Unknown0"}, {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"}, {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"}, {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"}, @@ -2809,23 +2823,23 @@ static const FunctionDef SVC_Table_64[] = { {0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"}, {0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"}, {0x36, SvcWrap64<SynchronizePreemptionState>, "SynchronizePreemptionState"}, - {0x37, nullptr, "Unknown"}, - {0x38, nullptr, "Unknown"}, - {0x39, nullptr, "Unknown"}, - {0x3A, nullptr, "Unknown"}, - {0x3B, nullptr, "Unknown"}, + {0x37, nullptr, "GetResourceLimitPeakValue"}, + {0x38, nullptr, "Unknown38"}, + {0x39, nullptr, "CreateIoPool"}, + {0x3A, nullptr, "CreateIoRegion"}, + {0x3B, nullptr, "Unknown3B"}, {0x3C, SvcWrap64<KernelDebug>, "KernelDebug"}, {0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"}, - {0x3E, nullptr, "Unknown"}, - {0x3F, nullptr, "Unknown"}, + {0x3E, nullptr, "Unknown3e"}, + {0x3F, nullptr, "Unknown3f"}, {0x40, nullptr, "CreateSession"}, {0x41, nullptr, "AcceptSession"}, {0x42, nullptr, "ReplyAndReceiveLight"}, {0x43, nullptr, "ReplyAndReceive"}, {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"}, {0x45, SvcWrap64<CreateEvent>, "CreateEvent"}, - {0x46, nullptr, "Unknown"}, - {0x47, nullptr, "Unknown"}, + {0x46, nullptr, "MapIoRegion"}, + {0x47, nullptr, "UnmapIoRegion"}, {0x48, nullptr, "MapPhysicalMemoryUnsafe"}, {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"}, {0x4A, nullptr, "SetUnsafeLimit"}, @@ -2864,7 +2878,7 @@ static const FunctionDef SVC_Table_64[] = { {0x6B, nullptr, "WriteDebugProcessMemory"}, {0x6C, nullptr, "SetHardwareBreakPoint"}, {0x6D, nullptr, "GetDebugThreadParam"}, - {0x6E, nullptr, "Unknown"}, + {0x6E, nullptr, "Unknown6E"}, {0x6F, nullptr, "GetSystemInfo"}, {0x70, nullptr, "CreatePort"}, {0x71, nullptr, "ManageNamedPort"}, diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 365e22e4e..b2e9ec092 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -96,4 +96,6 @@ constexpr inline s32 IdealCoreNoUpdate = -3; constexpr inline s32 LowestThreadPriority = 63; constexpr inline s32 HighestThreadPriority = 0; +constexpr inline size_t ThreadLocalRegionSize = 0x200; + } // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index a60adfcab..d309f166c 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -669,4 +669,26 @@ void SvcWrap32(Core::System& system) { FuncReturn(system, retval); } +// Used by CreateCodeMemory32 +template <ResultCode func(Core::System&, Handle*, u32, u32)> +void SvcWrap32(Core::System& system) { + Handle handle = 0; + + const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2)).raw; + + system.CurrentArmInterface().SetReg(1, handle); + FuncReturn(system, retval); +} + +// Used by ControlCodeMemory32 +template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)> +void SvcWrap32(Core::System& system) { + const u32 retval = + func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4), + static_cast<Svc::MemoryPermission>(Param32(system, 6))) + .raw; + + FuncReturn(system, retval); +} + } // namespace Kernel diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp index 6e63e057e..e34ef5a78 100644 --- a/src/core/hle/service/acc/acc.cpp +++ b/src/core/hle/service/acc/acc.cpp @@ -39,9 +39,9 @@ constexpr ResultCode ERR_FAILED_SAVE_DATA{ErrorModule::Account, 100}; // Thumbnails are hard coded to be at least this size constexpr std::size_t THUMBNAIL_SIZE = 0x24000; -static std::filesystem::path GetImagePath(Common::UUID uuid) { +static std::filesystem::path GetImagePath(const Common::UUID& uuid) { return Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir) / - fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormatSwitch()); + fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormattedString()); } static constexpr u32 SanitizeJPEGSize(std::size_t size) { @@ -290,7 +290,7 @@ public: protected: void Get(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.Format()); + LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.RawString()); ProfileBase profile_base{}; ProfileData data{}; if (profile_manager.GetProfileBaseAndData(user_id, profile_base, data)) { @@ -300,21 +300,21 @@ protected: rb.PushRaw(profile_base); } else { LOG_ERROR(Service_ACC, "Failed to get profile base and data for user=0x{}", - user_id.Format()); + user_id.RawString()); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultUnknown); // TODO(ogniK): Get actual error code } } void GetBase(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.Format()); + LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.RawString()); ProfileBase profile_base{}; if (profile_manager.GetProfileBase(user_id, profile_base)) { IPC::ResponseBuilder rb{ctx, 16}; rb.Push(ResultSuccess); rb.PushRaw(profile_base); } else { - LOG_ERROR(Service_ACC, "Failed to get profile base for user=0x{}", user_id.Format()); + LOG_ERROR(Service_ACC, "Failed to get profile base for user=0x{}", user_id.RawString()); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultUnknown); // TODO(ogniK): Get actual error code } @@ -373,7 +373,7 @@ protected: LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}", Common::StringFromFixedZeroTerminatedBuffer( reinterpret_cast<const char*>(base.username.data()), base.username.size()), - base.timestamp, base.user_uuid.Format()); + base.timestamp, base.user_uuid.RawString()); if (user_data.size() < sizeof(ProfileData)) { LOG_ERROR(Service_ACC, "ProfileData buffer too small!"); @@ -406,7 +406,7 @@ protected: LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}", Common::StringFromFixedZeroTerminatedBuffer( reinterpret_cast<const char*>(base.username.data()), base.username.size()), - base.timestamp, base.user_uuid.Format()); + base.timestamp, base.user_uuid.RawString()); if (user_data.size() < sizeof(ProfileData)) { LOG_ERROR(Service_ACC, "ProfileData buffer too small!"); @@ -435,7 +435,7 @@ protected: } ProfileManager& profile_manager; - Common::UUID user_id{Common::INVALID_UUID}; ///< The user id this profile refers to. + Common::UUID user_id{}; ///< The user id this profile refers to. }; class IProfile final : public IProfileCommon { @@ -547,7 +547,7 @@ private: IPC::ResponseBuilder rb{ctx, 4}; rb.Push(ResultSuccess); - rb.PushRaw<u64>(user_id.GetNintendoID()); + rb.PushRaw<u64>(user_id.Hash()); } void EnsureIdTokenCacheAsync(Kernel::HLERequestContext& ctx) { @@ -577,7 +577,7 @@ private: IPC::ResponseBuilder rb{ctx, 4}; rb.Push(ResultSuccess); - rb.PushRaw<u64>(user_id.GetNintendoID()); + rb.PushRaw<u64>(user_id.Hash()); } void StoreOpenContext(Kernel::HLERequestContext& ctx) { @@ -587,7 +587,7 @@ private: } std::shared_ptr<EnsureTokenIdCacheAsyncInterface> ensure_token_id{}; - Common::UUID user_id{Common::INVALID_UUID}; + Common::UUID user_id{}; }; // 6.0.0+ @@ -687,7 +687,7 @@ void Module::Interface::GetUserCount(Kernel::HLERequestContext& ctx) { void Module::Interface::GetUserExistence(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; Common::UUID user_id = rp.PopRaw<Common::UUID>(); - LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.Format()); + LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.RawString()); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); @@ -718,7 +718,7 @@ void Module::Interface::GetLastOpenedUser(Kernel::HLERequestContext& ctx) { void Module::Interface::GetProfile(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; Common::UUID user_id = rp.PopRaw<Common::UUID>(); - LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.Format()); + LOG_DEBUG(Service_ACC, "called user_id=0x{}", user_id.RawString()); IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(ResultSuccess); @@ -833,7 +833,7 @@ void Module::Interface::GetProfileEditor(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; Common::UUID user_id = rp.PopRaw<Common::UUID>(); - LOG_DEBUG(Service_ACC, "called, user_id=0x{}", user_id.Format()); + LOG_DEBUG(Service_ACC, "called, user_id=0x{}", user_id.RawString()); IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(ResultSuccess); @@ -875,7 +875,7 @@ void Module::Interface::StoreSaveDataThumbnailApplication(Kernel::HLERequestCont IPC::RequestParser rp{ctx}; const auto uuid = rp.PopRaw<Common::UUID>(); - LOG_WARNING(Service_ACC, "(STUBBED) called, uuid=0x{}", uuid.Format()); + LOG_WARNING(Service_ACC, "(STUBBED) called, uuid=0x{}", uuid.RawString()); // TODO(ogniK): Check if application ID is zero on acc initialize. As we don't have a reliable // way of confirming things like the TID, we're going to assume a non zero value for the time @@ -889,7 +889,7 @@ void Module::Interface::StoreSaveDataThumbnailSystem(Kernel::HLERequestContext& const auto uuid = rp.PopRaw<Common::UUID>(); const auto tid = rp.Pop<u64_le>(); - LOG_WARNING(Service_ACC, "(STUBBED) called, uuid=0x{}, tid={:016X}", uuid.Format(), tid); + LOG_WARNING(Service_ACC, "(STUBBED) called, uuid=0x{}, tid={:016X}", uuid.RawString(), tid); StoreSaveDataThumbnail(ctx, uuid, tid); } @@ -903,7 +903,7 @@ void Module::Interface::StoreSaveDataThumbnail(Kernel::HLERequestContext& ctx, return; } - if (!uuid) { + if (uuid.IsInvalid()) { LOG_ERROR(Service_ACC, "User ID is not valid!"); rb.Push(ERR_INVALID_USER_ID); return; @@ -927,20 +927,20 @@ void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContex IPC::ResponseBuilder rb{ctx, 6}; if (profile_manager->GetUserCount() != 1) { rb.Push(ResultSuccess); - rb.PushRaw<u128>(Common::INVALID_UUID); + rb.PushRaw(Common::InvalidUUID); return; } const auto user_list = profile_manager->GetAllUsers(); if (std::ranges::all_of(user_list, [](const auto& user) { return user.IsInvalid(); })) { rb.Push(ResultUnknown); // TODO(ogniK): Find the correct error code - rb.PushRaw<u128>(Common::INVALID_UUID); + rb.PushRaw(Common::InvalidUUID); return; } // Select the first user we have rb.Push(ResultSuccess); - rb.PushRaw<u128>(profile_manager->GetUser(0)->uuid); + rb.PushRaw(profile_manager->GetUser(0)->uuid); } Module::Interface::Interface(std::shared_ptr<Module> module_, diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp index 568303ced..fba847142 100644 --- a/src/core/hle/service/acc/profile_manager.cpp +++ b/src/core/hle/service/acc/profile_manager.cpp @@ -19,8 +19,8 @@ namespace FS = Common::FS; using Common::UUID; struct UserRaw { - UUID uuid{Common::INVALID_UUID}; - UUID uuid2{Common::INVALID_UUID}; + UUID uuid{}; + UUID uuid2{}; u64 timestamp{}; ProfileUsername username{}; ProfileData extra_data{}; @@ -45,7 +45,7 @@ ProfileManager::ProfileManager() { // Create an user if none are present if (user_count == 0) { - CreateNewUser(UUID::Generate(), "yuzu"); + CreateNewUser(UUID::MakeRandom(), "yuzu"); } auto current = @@ -101,7 +101,7 @@ ResultCode ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& usern if (user_count == MAX_USERS) { return ERROR_TOO_MANY_USERS; } - if (!uuid) { + if (uuid.IsInvalid()) { return ERROR_ARGUMENT_IS_NULL; } if (username[0] == 0x0) { @@ -145,7 +145,7 @@ std::optional<UUID> ProfileManager::GetUser(std::size_t index) const { /// Returns a users profile index based on their user id. std::optional<std::size_t> ProfileManager::GetUserIndex(const UUID& uuid) const { - if (!uuid) { + if (uuid.IsInvalid()) { return std::nullopt; } @@ -250,9 +250,10 @@ UserIDArray ProfileManager::GetOpenUsers() const { std::ranges::transform(profiles, output.begin(), [](const ProfileInfo& p) { if (p.is_open) return p.user_uuid; - return UUID{Common::INVALID_UUID}; + return Common::InvalidUUID; }); - std::stable_partition(output.begin(), output.end(), [](const UUID& uuid) { return uuid; }); + std::stable_partition(output.begin(), output.end(), + [](const UUID& uuid) { return uuid.IsValid(); }); return output; } @@ -299,7 +300,7 @@ bool ProfileManager::RemoveUser(UUID uuid) { profiles[*index] = ProfileInfo{}; std::stable_partition(profiles.begin(), profiles.end(), - [](const ProfileInfo& profile) { return profile.user_uuid; }); + [](const ProfileInfo& profile) { return profile.user_uuid.IsValid(); }); return true; } @@ -361,7 +362,7 @@ void ProfileManager::ParseUserSaveFile() { } std::stable_partition(profiles.begin(), profiles.end(), - [](const ProfileInfo& profile) { return profile.user_uuid; }); + [](const ProfileInfo& profile) { return profile.user_uuid.IsValid(); }); } void ProfileManager::WriteUserSaveFile() { diff --git a/src/core/hle/service/acc/profile_manager.h b/src/core/hle/service/acc/profile_manager.h index 71b9d5518..17347f7ef 100644 --- a/src/core/hle/service/acc/profile_manager.h +++ b/src/core/hle/service/acc/profile_manager.h @@ -35,7 +35,7 @@ static_assert(sizeof(ProfileData) == 0x80, "ProfileData structure has incorrect /// This holds general information about a users profile. This is where we store all the information /// based on a specific user struct ProfileInfo { - Common::UUID user_uuid{Common::INVALID_UUID}; + Common::UUID user_uuid{}; ProfileUsername username{}; u64 creation_time{}; ProfileData data{}; // TODO(ognik): Work out what this is @@ -49,7 +49,7 @@ struct ProfileBase { // Zero out all the fields to make the profile slot considered "Empty" void Invalidate() { - user_uuid.Invalidate(); + user_uuid = {}; timestamp = 0; username.fill(0); } @@ -103,7 +103,7 @@ private: std::array<ProfileInfo, MAX_USERS> profiles{}; std::size_t user_count{}; - Common::UUID last_opened_user{Common::INVALID_UUID}; + Common::UUID last_opened_user{}; }; }; // namespace Service::Account diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index e60661fe1..420de3c54 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -55,7 +55,7 @@ constexpr u32 LAUNCH_PARAMETER_ACCOUNT_PRESELECTED_USER_MAGIC = 0xC79497CA; struct LaunchParameterAccountPreselectedUser { u32_le magic; u32_le is_account_selected; - u128 current_user; + Common::UUID current_user; INSERT_PADDING_BYTES(0x70); }; static_assert(sizeof(LaunchParameterAccountPreselectedUser) == 0x88); @@ -618,7 +618,7 @@ void AppletMessageQueue::PushMessage(AppletMessage msg) { AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() { if (messages.empty()) { on_new_message->GetWritableEvent().Clear(); - return AppletMessage::NoMessage; + return AppletMessage::None; } auto msg = messages.front(); messages.pop(); @@ -633,7 +633,7 @@ std::size_t AppletMessageQueue::GetMessageCount() const { } void AppletMessageQueue::RequestExit() { - PushMessage(AppletMessage::ExitRequested); + PushMessage(AppletMessage::Exit); } void AppletMessageQueue::FocusStateChanged() { @@ -732,7 +732,7 @@ void ICommonStateGetter::ReceiveMessage(Kernel::HLERequestContext& ctx) { const auto message = msg_queue->PopMessage(); IPC::ResponseBuilder rb{ctx, 3}; - if (message == AppletMessageQueue::AppletMessage::NoMessage) { + if (message == AppletMessageQueue::AppletMessage::None) { LOG_ERROR(Service_AM, "Message queue is empty"); rb.Push(ERR_NO_MESSAGES); rb.PushEnum<AppletMessageQueue::AppletMessage>(message); @@ -980,7 +980,7 @@ private: LOG_DEBUG(Service_AM, "called"); IPC::RequestParser rp{ctx}; - applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>()); + applet->GetBroker().PushNormalDataFromGame(rp.PopIpcInterface<IStorage>().lock()); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); @@ -1007,7 +1007,7 @@ private: LOG_DEBUG(Service_AM, "called"); IPC::RequestParser rp{ctx}; - applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>()); + applet->GetBroker().PushInteractiveDataFromGame(rp.PopIpcInterface<IStorage>().lock()); ASSERT(applet->IsInitialized()); applet->ExecuteInteractive(); @@ -1453,8 +1453,8 @@ void IApplicationFunctions::PopLaunchParameter(Kernel::HLERequestContext& ctx) { Account::ProfileManager profile_manager{}; const auto uuid = profile_manager.GetUser(static_cast<s32>(Settings::values.current_user)); - ASSERT(uuid); - params.current_user = uuid->uuid; + ASSERT(uuid.has_value() && uuid->IsValid()); + params.current_user = *uuid; IPC::ResponseBuilder rb{ctx, 2, 0, 1}; diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index 2a578aea5..fdd937b82 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h @@ -22,6 +22,7 @@ class NVFlinger; namespace Service::AM { +// This is nn::settings::Language enum SystemLanguage { Japanese = 0, English = 1, // en-US @@ -41,16 +42,44 @@ enum SystemLanguage { // 4.0.0+ SimplifiedChinese = 15, TraditionalChinese = 16, + // 10.1.0+ + BrazilianPortuguese = 17, }; class AppletMessageQueue { public: + // This is nn::am::AppletMessage enum class AppletMessage : u32 { - NoMessage = 0, - ExitRequested = 4, + None = 0, + ChangeIntoForeground = 1, + ChangeIntoBackground = 2, + Exit = 4, + ApplicationExited = 6, FocusStateChanged = 15, + Resume = 16, + DetectShortPressingHomeButton = 20, + DetectLongPressingHomeButton = 21, + DetectShortPressingPowerButton = 22, + DetectMiddlePressingPowerButton = 23, + DetectLongPressingPowerButton = 24, + RequestToPrepareSleep = 25, + FinishedSleepSequence = 26, + SleepRequiredByHighTemperature = 27, + SleepRequiredByLowBattery = 28, + AutoPowerDown = 29, OperationModeChanged = 30, PerformanceModeChanged = 31, + DetectReceivingCecSystemStandby = 32, + SdCardRemoved = 33, + LaunchApplicationRequested = 50, + RequestToDisplay = 51, + ShowApplicationLogo = 55, + HideApplicationLogo = 56, + ForceHideApplicationLogo = 57, + FloatingApplicationDetected = 60, + DetectShortPressingCaptureButton = 90, + AlbumScreenShotTaken = 92, + AlbumRecordingSaved = 93, }; explicit AppletMessageQueue(Core::System& system); @@ -179,11 +208,14 @@ public: ~ICommonStateGetter() override; private: + // This is nn::oe::FocusState enum class FocusState : u8 { InFocus = 1, NotInFocus = 2, + Background = 3, }; + // This is nn::oe::OperationMode enum class OperationMode : u8 { Handheld = 0, Docked = 1, diff --git a/src/core/hle/service/am/applets/applet_mii.cpp b/src/core/hle/service/am/applets/applet_mii.cpp new file mode 100644 index 000000000..8c4173737 --- /dev/null +++ b/src/core/hle/service/am/applets/applet_mii.cpp @@ -0,0 +1,101 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/logging/log.h" +#include "core/core.h" +#include "core/frontend/applets/mii.h" +#include "core/hle/service/am/am.h" +#include "core/hle/service/am/applets/applet_mii.h" +#include "core/reporter.h" + +namespace Service::AM::Applets { + +Mii::Mii(Core::System& system_, LibraryAppletMode applet_mode_, + const Core::Frontend::MiiApplet& frontend_) + : Applet{system_, applet_mode_}, frontend{frontend_}, system{system_} {} + +Mii::~Mii() = default; + +void Mii::Initialize() { + is_complete = false; + + const auto storage = broker.PopNormalDataToApplet(); + ASSERT(storage != nullptr); + + const auto data = storage->GetData(); + ASSERT(data.size() == sizeof(MiiAppletInput)); + + std::memcpy(&input_data, data.data(), sizeof(MiiAppletInput)); +} + +bool Mii::TransactionComplete() const { + return is_complete; +} + +ResultCode Mii::GetStatus() const { + return ResultSuccess; +} + +void Mii::ExecuteInteractive() { + UNREACHABLE_MSG("Unexpected interactive applet data!"); +} + +void Mii::Execute() { + if (is_complete) { + return; + } + + const auto callback = [this](const Core::Frontend::MiiParameters& parameters) { + DisplayCompleted(parameters); + }; + + switch (input_data.applet_mode) { + case MiiAppletMode::ShowMiiEdit: { + Service::Mii::MiiManager manager; + Core::Frontend::MiiParameters params{ + .is_editable = false, + .mii_data = input_data.mii_char_info.mii_data, + }; + frontend.ShowMii(params, callback); + break; + } + case MiiAppletMode::EditMii: { + Service::Mii::MiiManager manager; + Core::Frontend::MiiParameters params{ + .is_editable = true, + .mii_data = input_data.mii_char_info.mii_data, + }; + frontend.ShowMii(params, callback); + break; + } + case MiiAppletMode::CreateMii: { + Service::Mii::MiiManager manager; + Core::Frontend::MiiParameters params{ + .is_editable = true, + .mii_data = manager.BuildDefault(0), + }; + frontend.ShowMii(params, callback); + break; + } + default: + UNIMPLEMENTED_MSG("Unimplemented LibAppletMiiEdit mode={:02X}!", input_data.applet_mode); + } +} + +void Mii::DisplayCompleted(const Core::Frontend::MiiParameters& parameters) { + is_complete = true; + + std::vector<u8> reply(sizeof(AppletOutputForCharInfoEditing)); + output_data = { + .result = ResultSuccess, + .mii_data = parameters.mii_data, + }; + + std::memcpy(reply.data(), &output_data, sizeof(AppletOutputForCharInfoEditing)); + broker.PushNormalDataFromApplet(std::make_shared<IStorage>(system, std::move(reply))); + broker.SignalStateChanged(); +} + +} // namespace Service::AM::Applets diff --git a/src/core/hle/service/am/applets/applet_mii.h b/src/core/hle/service/am/applets/applet_mii.h new file mode 100644 index 000000000..42326bfc2 --- /dev/null +++ b/src/core/hle/service/am/applets/applet_mii.h @@ -0,0 +1,90 @@ +// Copyright 2022 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> + +#include "core/hle/result.h" +#include "core/hle/service/am/applets/applets.h" +#include "core/hle/service/mii/mii_manager.h" + +namespace Core { +class System; +} + +namespace Service::AM::Applets { + +// This is nn::mii::AppletMode +enum class MiiAppletMode : u32 { + ShowMiiEdit = 0, + AppendMii = 1, + AppendMiiImage = 2, + UpdateMiiImage = 3, + CreateMii = 4, + EditMii = 5, +}; + +struct MiiCharInfo { + Service::Mii::MiiInfo mii_data{}; + INSERT_PADDING_BYTES(0x28); +}; +static_assert(sizeof(MiiCharInfo) == 0x80, "MiiCharInfo has incorrect size."); + +// This is nn::mii::AppletInput +struct MiiAppletInput { + s32 version{}; + MiiAppletMode applet_mode{}; + u32 special_mii_key_code{}; + union { + std::array<Common::UUID, 8> valid_uuid; + MiiCharInfo mii_char_info; + }; + Common::UUID used_uuid; + INSERT_PADDING_BYTES(0x64); +}; +static_assert(sizeof(MiiAppletInput) == 0x100, "MiiAppletInput has incorrect size."); + +// This is nn::mii::AppletOutput +struct MiiAppletOutput { + ResultCode result{ResultSuccess}; + s32 index{}; + INSERT_PADDING_BYTES(0x18); +}; +static_assert(sizeof(MiiAppletOutput) == 0x20, "MiiAppletOutput has incorrect size."); + +// This is nn::mii::AppletOutputForCharInfoEditing +struct AppletOutputForCharInfoEditing { + ResultCode result{ResultSuccess}; + Service::Mii::MiiInfo mii_data{}; + INSERT_PADDING_BYTES(0x24); +}; +static_assert(sizeof(AppletOutputForCharInfoEditing) == 0x80, + "AppletOutputForCharInfoEditing has incorrect size."); + +class Mii final : public Applet { +public: + explicit Mii(Core::System& system_, LibraryAppletMode applet_mode_, + const Core::Frontend::MiiApplet& frontend_); + ~Mii() override; + + void Initialize() override; + + bool TransactionComplete() const override; + ResultCode GetStatus() const override; + void ExecuteInteractive() override; + void Execute() override; + + void DisplayCompleted(const Core::Frontend::MiiParameters& parameters); + +private: + const Core::Frontend::MiiApplet& frontend; + MiiAppletInput input_data{}; + AppletOutputForCharInfoEditing output_data{}; + + bool is_complete = false; + Core::System& system; +}; + +} // namespace Service::AM::Applets diff --git a/src/core/hle/service/am/applets/applet_profile_select.cpp b/src/core/hle/service/am/applets/applet_profile_select.cpp index a6e891944..82500e121 100644 --- a/src/core/hle/service/am/applets/applet_profile_select.cpp +++ b/src/core/hle/service/am/applets/applet_profile_select.cpp @@ -62,11 +62,11 @@ void ProfileSelect::SelectionComplete(std::optional<Common::UUID> uuid) { if (uuid.has_value() && uuid->IsValid()) { output.result = 0; - output.uuid_selected = uuid->uuid; + output.uuid_selected = *uuid; } else { status = ERR_USER_CANCELLED_SELECTION; output.result = ERR_USER_CANCELLED_SELECTION.raw; - output.uuid_selected = Common::INVALID_UUID; + output.uuid_selected = Common::InvalidUUID; } final_data = std::vector<u8>(sizeof(UserSelectionOutput)); diff --git a/src/core/hle/service/am/applets/applet_profile_select.h b/src/core/hle/service/am/applets/applet_profile_select.h index 8fb76e6c4..852e1e0c0 100644 --- a/src/core/hle/service/am/applets/applet_profile_select.h +++ b/src/core/hle/service/am/applets/applet_profile_select.h @@ -27,7 +27,7 @@ static_assert(sizeof(UserSelectionConfig) == 0xA0, "UserSelectionConfig has inco struct UserSelectionOutput { u64 result; - u128 uuid_selected; + Common::UUID uuid_selected; }; static_assert(sizeof(UserSelectionOutput) == 0x18, "UserSelectionOutput has incorrect size."); diff --git a/src/core/hle/service/am/applets/applets.cpp b/src/core/hle/service/am/applets/applets.cpp index 134ac1ee2..79e62679d 100644 --- a/src/core/hle/service/am/applets/applets.cpp +++ b/src/core/hle/service/am/applets/applets.cpp @@ -9,6 +9,7 @@ #include "core/frontend/applets/controller.h" #include "core/frontend/applets/error.h" #include "core/frontend/applets/general_frontend.h" +#include "core/frontend/applets/mii.h" #include "core/frontend/applets/profile_select.h" #include "core/frontend/applets/software_keyboard.h" #include "core/frontend/applets/web_browser.h" @@ -19,6 +20,7 @@ #include "core/hle/service/am/applets/applet_controller.h" #include "core/hle/service/am/applets/applet_error.h" #include "core/hle/service/am/applets/applet_general_backend.h" +#include "core/hle/service/am/applets/applet_mii.h" #include "core/hle/service/am/applets/applet_profile_select.h" #include "core/hle/service/am/applets/applet_software_keyboard.h" #include "core/hle/service/am/applets/applet_web_browser.h" @@ -172,10 +174,11 @@ AppletFrontendSet::AppletFrontendSet() = default; AppletFrontendSet::AppletFrontendSet(ControllerApplet controller_applet, ErrorApplet error_applet, ParentalControlsApplet parental_controls_applet, - PhotoViewer photo_viewer_, ProfileSelect profile_select_, + MiiApplet mii_applet, PhotoViewer photo_viewer_, + ProfileSelect profile_select_, SoftwareKeyboard software_keyboard_, WebBrowser web_browser_) : controller{std::move(controller_applet)}, error{std::move(error_applet)}, - parental_controls{std::move(parental_controls_applet)}, + parental_controls{std::move(parental_controls_applet)}, mii{std::move(mii_applet)}, photo_viewer{std::move(photo_viewer_)}, profile_select{std::move(profile_select_)}, software_keyboard{std::move(software_keyboard_)}, web_browser{std::move(web_browser_)} {} @@ -206,6 +209,10 @@ void AppletManager::SetAppletFrontendSet(AppletFrontendSet set) { frontend.parental_controls = std::move(set.parental_controls); } + if (set.mii != nullptr) { + frontend.mii = std::move(set.mii); + } + if (set.photo_viewer != nullptr) { frontend.photo_viewer = std::move(set.photo_viewer); } @@ -243,6 +250,10 @@ void AppletManager::SetDefaultAppletsIfMissing() { std::make_unique<Core::Frontend::DefaultParentalControlsApplet>(); } + if (frontend.mii == nullptr) { + frontend.mii = std::make_unique<Core::Frontend::DefaultMiiApplet>(); + } + if (frontend.photo_viewer == nullptr) { frontend.photo_viewer = std::make_unique<Core::Frontend::DefaultPhotoViewerApplet>(); } @@ -277,6 +288,8 @@ std::shared_ptr<Applet> AppletManager::GetApplet(AppletId id, LibraryAppletMode return std::make_shared<ProfileSelect>(system, mode, *frontend.profile_select); case AppletId::SoftwareKeyboard: return std::make_shared<SoftwareKeyboard>(system, mode, *frontend.software_keyboard); + case AppletId::MiiEdit: + return std::make_shared<Mii>(system, mode, *frontend.mii); case AppletId::Web: case AppletId::Shop: case AppletId::OfflineWeb: diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h index 15eeb4ee1..0c44aec79 100644 --- a/src/core/hle/service/am/applets/applets.h +++ b/src/core/hle/service/am/applets/applets.h @@ -21,6 +21,7 @@ class ControllerApplet; class ECommerceApplet; class ErrorApplet; class ParentalControlsApplet; +class MiiApplet; class PhotoViewerApplet; class ProfileSelectApplet; class SoftwareKeyboardApplet; @@ -179,6 +180,7 @@ struct AppletFrontendSet { using ControllerApplet = std::unique_ptr<Core::Frontend::ControllerApplet>; using ErrorApplet = std::unique_ptr<Core::Frontend::ErrorApplet>; using ParentalControlsApplet = std::unique_ptr<Core::Frontend::ParentalControlsApplet>; + using MiiApplet = std::unique_ptr<Core::Frontend::MiiApplet>; using PhotoViewer = std::unique_ptr<Core::Frontend::PhotoViewerApplet>; using ProfileSelect = std::unique_ptr<Core::Frontend::ProfileSelectApplet>; using SoftwareKeyboard = std::unique_ptr<Core::Frontend::SoftwareKeyboardApplet>; @@ -186,9 +188,9 @@ struct AppletFrontendSet { AppletFrontendSet(); AppletFrontendSet(ControllerApplet controller_applet, ErrorApplet error_applet, - ParentalControlsApplet parental_controls_applet, PhotoViewer photo_viewer_, - ProfileSelect profile_select_, SoftwareKeyboard software_keyboard_, - WebBrowser web_browser_); + ParentalControlsApplet parental_controls_applet, MiiApplet mii_applet, + PhotoViewer photo_viewer_, ProfileSelect profile_select_, + SoftwareKeyboard software_keyboard_, WebBrowser web_browser_); ~AppletFrontendSet(); AppletFrontendSet(const AppletFrontendSet&) = delete; @@ -200,6 +202,7 @@ struct AppletFrontendSet { ControllerApplet controller; ErrorApplet error; ParentalControlsApplet parental_controls; + MiiApplet mii; PhotoViewer photo_viewer; ProfileSelect profile_select; SoftwareKeyboard software_keyboard; diff --git a/src/core/hle/service/apm/apm_controller.cpp b/src/core/hle/service/apm/apm_controller.cpp index 98839fe97..187fef2ad 100644 --- a/src/core/hle/service/apm/apm_controller.cpp +++ b/src/core/hle/service/apm/apm_controller.cpp @@ -17,8 +17,8 @@ constexpr auto DEFAULT_PERFORMANCE_CONFIGURATION = PerformanceConfiguration::Con Controller::Controller(Core::Timing::CoreTiming& core_timing_) : core_timing{core_timing_}, configs{ - {PerformanceMode::Handheld, DEFAULT_PERFORMANCE_CONFIGURATION}, - {PerformanceMode::Docked, DEFAULT_PERFORMANCE_CONFIGURATION}, + {PerformanceMode::Normal, DEFAULT_PERFORMANCE_CONFIGURATION}, + {PerformanceMode::Boost, DEFAULT_PERFORMANCE_CONFIGURATION}, } {} Controller::~Controller() = default; @@ -63,13 +63,13 @@ void Controller::SetFromCpuBoostMode(CpuBoostMode mode) { PerformanceConfiguration::Config15, }}; - SetPerformanceConfiguration(PerformanceMode::Docked, + SetPerformanceConfiguration(PerformanceMode::Boost, BOOST_MODE_TO_CONFIG_MAP.at(static_cast<u32>(mode))); } PerformanceMode Controller::GetCurrentPerformanceMode() const { - return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Docked - : PerformanceMode::Handheld; + return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Boost + : PerformanceMode::Normal; } PerformanceConfiguration Controller::GetCurrentPerformanceConfiguration(PerformanceMode mode) { diff --git a/src/core/hle/service/apm/apm_controller.h b/src/core/hle/service/apm/apm_controller.h index 8d48e0104..d6fbd2c0c 100644 --- a/src/core/hle/service/apm/apm_controller.h +++ b/src/core/hle/service/apm/apm_controller.h @@ -32,15 +32,18 @@ enum class PerformanceConfiguration : u32 { Config16 = 0x9222000C, }; +// This is nn::oe::CpuBoostMode enum class CpuBoostMode : u32 { - Disabled = 0, - Full = 1, // CPU + GPU -> Config 13, 14, 15, or 16 - Partial = 2, // GPU Only -> Config 15 or 16 + Normal = 0, // Boost mode disabled + FastLoad = 1, // CPU + GPU -> Config 13, 14, 15, or 16 + Partial = 2, // GPU Only -> Config 15 or 16 }; -enum class PerformanceMode : u8 { - Handheld = 0, - Docked = 1, +// This is nn::oe::PerformanceMode +enum class PerformanceMode : s32 { + Invalid = -1, + Normal = 0, + Boost = 1, }; // Class to manage the state and change of the emulated system performance. diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp index 9f9cea1e0..79cd3acbb 100644 --- a/src/core/hle/service/friend/friend.cpp +++ b/src/core/hle/service/friend/friend.cpp @@ -173,7 +173,7 @@ private: const auto uuid = rp.PopRaw<Common::UUID>(); LOG_WARNING(Service_Friend, "(STUBBED) called, local_play={}, uuid=0x{}", local_play, - uuid.Format()); + uuid.RawString()); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); @@ -186,7 +186,7 @@ private: [[maybe_unused]] const auto filter = rp.PopRaw<SizedFriendFilter>(); const auto pid = rp.Pop<u64>(); LOG_WARNING(Service_Friend, "(STUBBED) called, offset={}, uuid=0x{}, pid={}", friend_offset, - uuid.Format(), pid); + uuid.RawString(), pid); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); @@ -312,7 +312,7 @@ void Module::Interface::CreateNotificationService(Kernel::HLERequestContext& ctx IPC::RequestParser rp{ctx}; auto uuid = rp.PopRaw<Common::UUID>(); - LOG_DEBUG(Service_Friend, "called, uuid=0x{}", uuid.Format()); + LOG_DEBUG(Service_Friend, "called, uuid=0x{}", uuid.RawString()); IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(ResultSuccess); diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index a2bf7defb..d9202ea6c 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -320,7 +320,7 @@ Hid::Hid(Core::System& system_) {308, nullptr, "SetSevenSixAxisSensorFusionStrength"}, {309, nullptr, "GetSevenSixAxisSensorFusionStrength"}, {310, &Hid::ResetSevenSixAxisSensorTimestamp, "ResetSevenSixAxisSensorTimestamp"}, - {400, nullptr, "IsUsbFullKeyControllerEnabled"}, + {400, &Hid::IsUsbFullKeyControllerEnabled, "IsUsbFullKeyControllerEnabled"}, {401, nullptr, "EnableUsbFullKeyController"}, {402, nullptr, "IsUsbFullKeyControllerConnected"}, {403, nullptr, "HasBattery"}, @@ -1673,6 +1673,16 @@ void Hid::ResetSevenSixAxisSensorTimestamp(Kernel::HLERequestContext& ctx) { rb.Push(ResultSuccess); } +void Hid::IsUsbFullKeyControllerEnabled(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + + LOG_WARNING(Service_HID, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(false); +} + void Hid::SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const auto applet_resource_user_id{rp.Pop<u64>()}; diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index d290df161..c281081a7 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h @@ -159,6 +159,7 @@ private: void InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx); void FinalizeSevenSixAxisSensor(Kernel::HLERequestContext& ctx); void ResetSevenSixAxisSensorTimestamp(Kernel::HLERequestContext& ctx); + void IsUsbFullKeyControllerEnabled(Kernel::HLERequestContext& ctx); void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx); void SetPalmaBoostMode(Kernel::HLERequestContext& ctx); void SetNpadCommunicationMode(Kernel::HLERequestContext& ctx); diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp index 62f4cdfb2..ff0bbb788 100644 --- a/src/core/hle/service/kernel_helpers.cpp +++ b/src/core/hle/service/kernel_helpers.cpp @@ -3,7 +3,9 @@ // Refer to the license.txt file included. #include "core/core.h" +#include "core/core_timing.h" #include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_memory_manager.h" #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_resource_limit.h" @@ -15,9 +17,11 @@ namespace Service::KernelHelpers { ServiceContext::ServiceContext(Core::System& system_, std::string name_) : kernel(system_.Kernel()) { + // Create the process. process = Kernel::KProcess::Create(kernel); ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_), - Kernel::KProcess::ProcessType::Userland) + Kernel::KProcess::ProcessType::KernelInternal, + kernel.GetSystemResourceLimit()) .IsSuccess()); } @@ -43,7 +47,7 @@ Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) { } // Initialize the event. - event->Initialize(std::move(name)); + event->Initialize(std::move(name), process); // Commit the thread reservation. event_reservation.Commit(); diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 9fc7bb1b1..099276420 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -288,7 +288,7 @@ public: } bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const { - constexpr std::size_t padding_size{4 * Kernel::PageSize}; + const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize}; const auto start_info{page_table.QueryInfo(start - 1)}; if (start_info.state != Kernel::KMemoryState::Free) { @@ -308,31 +308,69 @@ public: return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); } - VAddr GetRandomMapRegion(const Kernel::KPageTable& page_table, std::size_t size) const { - VAddr addr{}; - const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >> - Kernel::PageBits}; - do { - addr = page_table.GetAliasCodeRegionStart() + - (Kernel::KSystemControl::GenerateRandomRange(0, end_pages) << Kernel::PageBits); - } while (!page_table.IsInsideAddressSpace(addr, size) || - page_table.IsInsideHeapRegion(addr, size) || - page_table.IsInsideAliasRegion(addr, size)); - return addr; + ResultCode GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) { + size = Common::AlignUp(size, Kernel::PageSize); + size += page_table.GetNumGuardPages() * Kernel::PageSize * 4; + + const auto is_region_available = [&](VAddr addr) { + const auto end_addr = addr + size; + while (addr < end_addr) { + if (system.Memory().IsValidVirtualAddress(addr)) { + return false; + } + + if (!page_table.IsInsideAddressSpace(out_addr, size)) { + return false; + } + + if (page_table.IsInsideHeapRegion(out_addr, size)) { + return false; + } + + if (page_table.IsInsideAliasRegion(out_addr, size)) { + return false; + } + + addr += Kernel::PageSize; + } + return true; + }; + + bool succeeded = false; + const auto map_region_end = + page_table.GetAliasCodeRegionStart() + page_table.GetAliasCodeRegionSize(); + while (current_map_addr < map_region_end) { + if (is_region_available(current_map_addr)) { + succeeded = true; + break; + } + current_map_addr += 0x100000; + } + + if (!succeeded) { + UNREACHABLE_MSG("Out of address space!"); + return Kernel::ResultOutOfMemory; + } + + out_addr = current_map_addr; + current_map_addr += size; + + return ResultSuccess; } - ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr baseAddress, - u64 size) const { + ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr base_addr, u64 size) { + auto& page_table{process->PageTable()}; + VAddr addr{}; + for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) { - auto& page_table{process->PageTable()}; - const VAddr addr{GetRandomMapRegion(page_table, size)}; - const ResultCode result{page_table.MapCodeMemory(addr, baseAddress, size)}; + R_TRY(GetAvailableMapRegion(page_table, size, addr)); + const ResultCode result{page_table.MapCodeMemory(addr, base_addr, size)}; if (result == Kernel::ResultInvalidCurrentMemory) { continue; } - CASCADE_CODE(result); + R_TRY(result); if (ValidateRegionForMap(page_table, addr, size)) { return addr; @@ -343,7 +381,7 @@ public: } ResultVal<VAddr> MapNro(Kernel::KProcess* process, VAddr nro_addr, std::size_t nro_size, - VAddr bss_addr, std::size_t bss_size, std::size_t size) const { + VAddr bss_addr, std::size_t bss_size, std::size_t size) { for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) { auto& page_table{process->PageTable()}; VAddr addr{}; @@ -597,6 +635,7 @@ public: LOG_WARNING(Service_LDR, "(STUBBED) called"); initialized = true; + current_map_addr = system.CurrentProcess()->PageTable().GetAliasCodeRegionStart(); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); @@ -607,6 +646,7 @@ private: std::map<VAddr, NROInfo> nro; std::map<VAddr, std::vector<SHA256Hash>> nrr; + VAddr current_map_addr{}; bool IsValidNROHash(const SHA256Hash& hash) const { return std::any_of(nrr.begin(), nrr.end(), [&hash](const auto& p) { diff --git a/src/core/hle/service/mii/mii_manager.cpp b/src/core/hle/service/mii/mii_manager.cpp index ca4ed35bb..0a57c3cde 100644 --- a/src/core/hle/service/mii/mii_manager.cpp +++ b/src/core/hle/service/mii/mii_manager.cpp @@ -118,16 +118,6 @@ u16 GenerateCrc16(const void* data, std::size_t size) { return Common::swap16(static_cast<u16>(crc)); } -Common::UUID GenerateValidUUID() { - auto uuid{Common::UUID::Generate()}; - - // Bit 7 must be set, and bit 6 unset for the UUID to be valid - uuid.uuid[1] &= 0xFFFFFFFFFFFFFF3FULL; - uuid.uuid[1] |= 0x0000000000000080ULL; - - return uuid; -} - template <typename T> T GetRandomValue(T min, T max) { std::random_device device; @@ -383,7 +373,7 @@ MiiStoreData::MiiStoreData() = default; MiiStoreData::MiiStoreData(const MiiStoreData::Name& name, const MiiStoreBitFields& bit_fields, const Common::UUID& user_id) { data.name = name; - data.uuid = GenerateValidUUID(); + data.uuid = Common::UUID::MakeRandomRFC4122V4(); std::memcpy(data.data.data(), &bit_fields, sizeof(MiiStoreBitFields)); data_crc = GenerateCrc16(data.data.data(), sizeof(data)); diff --git a/src/core/hle/service/mii/mii_manager.h b/src/core/hle/service/mii/mii_manager.h index 8e048fc56..6999d15b1 100644 --- a/src/core/hle/service/mii/mii_manager.h +++ b/src/core/hle/service/mii/mii_manager.h @@ -202,7 +202,7 @@ struct MiiStoreData { static_assert(sizeof(MiiStoreBitFields) == sizeof(data), "data field has incorrect size."); Name name{}; - Common::UUID uuid{Common::INVALID_UUID}; + Common::UUID uuid{}; } data; u16 data_crc{}; @@ -326,7 +326,7 @@ public: ResultCode GetIndex(const MiiInfo& info, u32& index); private: - const Common::UUID user_id{Common::INVALID_UUID}; + const Common::UUID user_id{}; u64 update_counter{}; }; diff --git a/src/core/hle/service/mnpp/mnpp_app.cpp b/src/core/hle/service/mnpp/mnpp_app.cpp new file mode 100644 index 000000000..53497612f --- /dev/null +++ b/src/core/hle/service/mnpp/mnpp_app.cpp @@ -0,0 +1,45 @@ +// Copyright 2022 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/logging/log.h" +#include "core/hle/ipc_helpers.h" +#include "core/hle/service/mnpp/mnpp_app.h" +#include "core/hle/service/sm/sm.h" + +namespace Service::MNPP { + +class MNPP_APP final : public ServiceFramework<MNPP_APP> { +public: + explicit MNPP_APP(Core::System& system_) : ServiceFramework{system_, "mnpp:app"} { + // clang-format off + static const FunctionInfo functions[] = { + {0, &MNPP_APP::Unknown0, "unknown0"}, + {1, &MNPP_APP::Unknown1, "unknown1"}, + }; + // clang-format on + + RegisterHandlers(functions); + } + +private: + void Unknown0(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_MNPP, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); + } + + void Unknown1(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_MNPP, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); + } +}; + +void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) { + std::make_shared<MNPP_APP>(system)->InstallAsService(service_manager); +} + +} // namespace Service::MNPP diff --git a/src/core/hle/service/mnpp/mnpp_app.h b/src/core/hle/service/mnpp/mnpp_app.h new file mode 100644 index 000000000..6bf20b494 --- /dev/null +++ b/src/core/hle/service/mnpp/mnpp_app.h @@ -0,0 +1,20 @@ +// Copyright 2022 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +namespace Core { +class System; +} + +namespace Service::SM { +class ServiceManager; +} + +namespace Service::MNPP { + +/// Registers all MNPP services with the specified service manager. +void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system); + +} // namespace Service::MNPP diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp index 761d0d3c6..513107715 100644 --- a/src/core/hle/service/nfp/nfp.cpp +++ b/src/core/hle/service/nfp/nfp.cpp @@ -7,6 +7,9 @@ #include "common/logging/log.h" #include "core/core.h" +#include "core/hid/emulated_controller.h" +#include "core/hid/hid_core.h" +#include "core/hid/hid_types.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/k_event.h" #include "core/hle/service/nfp/nfp.h" @@ -14,343 +17,790 @@ namespace Service::NFP { namespace ErrCodes { -constexpr ResultCode ERR_NO_APPLICATION_AREA(ErrorModule::NFP, 152); +constexpr ResultCode DeviceNotFound(ErrorModule::NFP, 64); +constexpr ResultCode WrongDeviceState(ErrorModule::NFP, 73); +constexpr ResultCode ApplicationAreaIsNotInitialized(ErrorModule::NFP, 128); +constexpr ResultCode ApplicationAreaExist(ErrorModule::NFP, 168); } // namespace ErrCodes -Module::Interface::Interface(std::shared_ptr<Module> module_, Core::System& system_, - const char* name) - : ServiceFramework{system_, name}, module{std::move(module_)}, service_context{system_, - "NFP::IUser"} { - nfc_tag_load = service_context.CreateEvent("NFP::IUser:NFCTagDetected"); -} - -Module::Interface::~Interface() { - service_context.CloseEvent(nfc_tag_load); -} - -class IUser final : public ServiceFramework<IUser> { -public: - explicit IUser(Module::Interface& nfp_interface_, Core::System& system_, - KernelHelpers::ServiceContext& service_context_) - : ServiceFramework{system_, "NFP::IUser"}, nfp_interface{nfp_interface_}, - service_context{service_context_} { - static const FunctionInfo functions[] = { - {0, &IUser::Initialize, "Initialize"}, - {1, &IUser::Finalize, "Finalize"}, - {2, &IUser::ListDevices, "ListDevices"}, - {3, &IUser::StartDetection, "StartDetection"}, - {4, &IUser::StopDetection, "StopDetection"}, - {5, &IUser::Mount, "Mount"}, - {6, &IUser::Unmount, "Unmount"}, - {7, &IUser::OpenApplicationArea, "OpenApplicationArea"}, - {8, &IUser::GetApplicationArea, "GetApplicationArea"}, - {9, nullptr, "SetApplicationArea"}, - {10, nullptr, "Flush"}, - {11, nullptr, "Restore"}, - {12, nullptr, "CreateApplicationArea"}, - {13, &IUser::GetTagInfo, "GetTagInfo"}, - {14, &IUser::GetRegisterInfo, "GetRegisterInfo"}, - {15, &IUser::GetCommonInfo, "GetCommonInfo"}, - {16, &IUser::GetModelInfo, "GetModelInfo"}, - {17, &IUser::AttachActivateEvent, "AttachActivateEvent"}, - {18, &IUser::AttachDeactivateEvent, "AttachDeactivateEvent"}, - {19, &IUser::GetState, "GetState"}, - {20, &IUser::GetDeviceState, "GetDeviceState"}, - {21, &IUser::GetNpadId, "GetNpadId"}, - {22, &IUser::GetApplicationAreaSize, "GetApplicationAreaSize"}, - {23, &IUser::AttachAvailabilityChangeEvent, "AttachAvailabilityChangeEvent"}, - {24, nullptr, "RecreateApplicationArea"}, - }; - RegisterHandlers(functions); +constexpr u32 ApplicationAreaSize = 0xD8; + +IUser::IUser(Module::Interface& nfp_interface_, Core::System& system_) + : ServiceFramework{system_, "NFP::IUser"}, service_context{system_, service_name}, + nfp_interface{nfp_interface_} { + static const FunctionInfo functions[] = { + {0, &IUser::Initialize, "Initialize"}, + {1, &IUser::Finalize, "Finalize"}, + {2, &IUser::ListDevices, "ListDevices"}, + {3, &IUser::StartDetection, "StartDetection"}, + {4, &IUser::StopDetection, "StopDetection"}, + {5, &IUser::Mount, "Mount"}, + {6, &IUser::Unmount, "Unmount"}, + {7, &IUser::OpenApplicationArea, "OpenApplicationArea"}, + {8, &IUser::GetApplicationArea, "GetApplicationArea"}, + {9, &IUser::SetApplicationArea, "SetApplicationArea"}, + {10, nullptr, "Flush"}, + {11, nullptr, "Restore"}, + {12, &IUser::CreateApplicationArea, "CreateApplicationArea"}, + {13, &IUser::GetTagInfo, "GetTagInfo"}, + {14, &IUser::GetRegisterInfo, "GetRegisterInfo"}, + {15, &IUser::GetCommonInfo, "GetCommonInfo"}, + {16, &IUser::GetModelInfo, "GetModelInfo"}, + {17, &IUser::AttachActivateEvent, "AttachActivateEvent"}, + {18, &IUser::AttachDeactivateEvent, "AttachDeactivateEvent"}, + {19, &IUser::GetState, "GetState"}, + {20, &IUser::GetDeviceState, "GetDeviceState"}, + {21, &IUser::GetNpadId, "GetNpadId"}, + {22, &IUser::GetApplicationAreaSize, "GetApplicationAreaSize"}, + {23, &IUser::AttachAvailabilityChangeEvent, "AttachAvailabilityChangeEvent"}, + {24, nullptr, "RecreateApplicationArea"}, + }; + RegisterHandlers(functions); - deactivate_event = service_context.CreateEvent("NFP::IUser:DeactivateEvent"); - availability_change_event = - service_context.CreateEvent("NFP::IUser:AvailabilityChangeEvent"); - } + availability_change_event = service_context.CreateEvent("IUser:AvailabilityChangeEvent"); +} - ~IUser() override { - service_context.CloseEvent(deactivate_event); - service_context.CloseEvent(availability_change_event); - } +void IUser::Initialize(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_NFC, "called"); -private: - struct TagInfo { - std::array<u8, 10> uuid; - u8 uuid_length; // TODO(ogniK): Figure out if this is actual the uuid length or does it - // mean something else - std::array<u8, 0x15> padding_1; - u32_le protocol; - u32_le tag_type; - std::array<u8, 0x2c> padding_2; - }; - static_assert(sizeof(TagInfo) == 0x54, "TagInfo is an invalid size"); + state = State::Initialized; - enum class State : u32 { - NonInitialized = 0, - Initialized = 1, - }; + // TODO(german77): Loop through all interfaces + nfp_interface.Initialize(); - enum class DeviceState : u32 { - Initialized = 0, - SearchingForTag = 1, - TagFound = 2, - TagRemoved = 3, - TagNearby = 4, - Unknown5 = 5, - Finalized = 6 - }; + IPC::ResponseBuilder rb{ctx, 2, 0}; + rb.Push(ResultSuccess); +} - struct CommonInfo { - u16_be last_write_year; - u8 last_write_month; - u8 last_write_day; - u16_be write_counter; - u16_be version; - u32_be application_area_size; - INSERT_PADDING_BYTES(0x34); - }; - static_assert(sizeof(CommonInfo) == 0x40, "CommonInfo is an invalid size"); +void IUser::Finalize(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_NFP, "called"); - void Initialize(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFC, "called"); + state = State::NonInitialized; - IPC::ResponseBuilder rb{ctx, 2, 0}; - rb.Push(ResultSuccess); + // TODO(german77): Loop through all interfaces + nfp_interface.Finalize(); - state = State::Initialized; - } + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} - void GetState(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFC, "called"); +void IUser::ListDevices(Kernel::HLERequestContext& ctx) { + LOG_INFO(Service_NFP, "called"); - IPC::ResponseBuilder rb{ctx, 3, 0}; - rb.Push(ResultSuccess); - rb.PushRaw<u32>(static_cast<u32>(state)); + std::vector<u64> devices; + + // TODO(german77): Loop through all interfaces + devices.push_back(nfp_interface.GetHandle()); + + ctx.WriteBuffer(devices); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(static_cast<s32>(devices.size())); +} + +void IUser::StartDetection(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto nfp_protocol{rp.Pop<s32>()}; + LOG_INFO(Service_NFP, "called, device_handle={}, nfp_protocol={}", device_handle, nfp_protocol); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + const auto result = nfp_interface.StartDetection(nfp_protocol); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); + return; } - void ListDevices(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const u32 array_size = rp.Pop<u32>(); - LOG_DEBUG(Service_NFP, "called, array_size={}", array_size); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - ctx.WriteBuffer(device_handle); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.Push<u32>(1); +void IUser::StopDetection(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + const auto result = nfp_interface.StopDetection(); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); + return; } - void GetNpadId(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const u64 dev_handle = rp.Pop<u64>(); - LOG_DEBUG(Service_NFP, "called, dev_handle=0x{:X}", dev_handle); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.Push<u32>(npad_id); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::Mount(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto model_type{rp.PopEnum<ModelType>()}; + const auto mount_target{rp.PopEnum<MountTarget>()}; + LOG_INFO(Service_NFP, "called, device_handle={}, model_type={}, mount_target={}", device_handle, + model_type, mount_target); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + const auto result = nfp_interface.Mount(); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); + return; } - void AttachActivateEvent(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const u64 dev_handle = rp.Pop<u64>(); - LOG_DEBUG(Service_NFP, "called, dev_handle=0x{:X}", dev_handle); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - IPC::ResponseBuilder rb{ctx, 2, 1}; - rb.Push(ResultSuccess); - rb.PushCopyObjects(nfp_interface.GetNFCEvent()); - has_attached_handle = true; + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::Unmount(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + const auto result = nfp_interface.Unmount(); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(result); + return; } - void AttachDeactivateEvent(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const u64 dev_handle = rp.Pop<u64>(); - LOG_DEBUG(Service_NFP, "called, dev_handle=0x{:X}", dev_handle); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - IPC::ResponseBuilder rb{ctx, 2, 1}; - rb.Push(ResultSuccess); - rb.PushCopyObjects(deactivate_event->GetReadableEvent()); - } - - void StopDetection(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); - - switch (device_state) { - case DeviceState::TagFound: - case DeviceState::TagNearby: - deactivate_event->GetWritableEvent().Signal(); - device_state = DeviceState::Initialized; - break; - case DeviceState::SearchingForTag: - case DeviceState::TagRemoved: - device_state = DeviceState::Initialized; - break; - default: - break; - } + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::OpenApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto access_id{rp.Pop<u32>()}; + LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, access_id={}", device_handle, + access_id); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + const auto result = nfp_interface.OpenApplicationArea(access_id); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(result); + return; } - void GetDeviceState(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::GetApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + std::vector<u8> data{}; + const auto result = nfp_interface.GetApplicationArea(data); + ctx.WriteBuffer(data); IPC::ResponseBuilder rb{ctx, 3}; - rb.Push(ResultSuccess); - rb.Push<u32>(static_cast<u32>(device_state)); + rb.Push(result); + rb.Push(static_cast<u32>(data.size())); + return; } - void StartDetection(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - if (device_state == DeviceState::Initialized || device_state == DeviceState::TagRemoved) { - device_state = DeviceState::SearchingForTag; - } + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::SetApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto data{ctx.ReadBuffer()}; + LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, data_size={}", device_handle, + data.size()); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + const auto result = nfp_interface.SetApplicationArea(data); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(result); + return; } - void GetTagInfo(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::CreateApplicationArea(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + const auto access_id{rp.Pop<u32>()}; + const auto data{ctx.ReadBuffer()}; + LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, data_size={}, access_id={}", + device_handle, access_id, data.size()); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + const auto result = nfp_interface.CreateApplicationArea(access_id, data); IPC::ResponseBuilder rb{ctx, 2}; - const auto& amiibo = nfp_interface.GetAmiiboBuffer(); - const TagInfo tag_info{ - .uuid = amiibo.uuid, - .uuid_length = static_cast<u8>(amiibo.uuid.size()), - .padding_1 = {}, - .protocol = 1, // TODO(ogniK): Figure out actual values - .tag_type = 2, - .padding_2 = {}, - }; - ctx.WriteBuffer(tag_info); - rb.Push(ResultSuccess); + rb.Push(result); + return; } - void Mount(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::GetTagInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); - device_state = DeviceState::TagNearby; + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + TagInfo tag_info{}; + const auto result = nfp_interface.GetTagInfo(tag_info); + ctx.WriteBuffer(tag_info); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(result); + return; } - void GetModelInfo(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::GetRegisterInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + RegisterInfo register_info{}; + const auto result = nfp_interface.GetRegisterInfo(register_info); + ctx.WriteBuffer(register_info); IPC::ResponseBuilder rb{ctx, 2}; - const auto& amiibo = nfp_interface.GetAmiiboBuffer(); - ctx.WriteBuffer(amiibo.model_info); - rb.Push(ResultSuccess); + rb.Push(result); + return; } - void Unmount(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - device_state = DeviceState::TagFound; + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} +void IUser::GetCommonInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + CommonInfo common_info{}; + const auto result = nfp_interface.GetCommonInfo(common_info); + ctx.WriteBuffer(common_info); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(result); + return; } - void Finalize(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_NFP, "called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} - device_state = DeviceState::Finalized; +void IUser::GetModelInfo(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_INFO(Service_NFP, "called, device_handle={}", device_handle); + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + ModelInfo model_info{}; + const auto result = nfp_interface.GetModelInfo(model_info); + ctx.WriteBuffer(model_info); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultSuccess); + rb.Push(result); + return; } - void AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_NFP, "(STUBBED) called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} +void IUser::AttachActivateEvent(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(ResultSuccess); - rb.PushCopyObjects(availability_change_event->GetReadableEvent()); + rb.PushCopyObjects(nfp_interface.GetActivateEvent()); + return; } - void GetRegisterInfo(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_NFP, "(STUBBED) called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - // TODO(ogniK): Pull Mii and owner data from amiibo + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} - IPC::ResponseBuilder rb{ctx, 2}; +void IUser::AttachDeactivateEvent(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(ResultSuccess); + rb.PushCopyObjects(nfp_interface.GetDeactivateEvent()); + return; } - void GetCommonInfo(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_NFP, "(STUBBED) called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - // TODO(ogniK): Pull common information from amiibo + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} - CommonInfo common_info{}; - common_info.application_area_size = 0; - ctx.WriteBuffer(common_info); +void IUser::GetState(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_NFC, "called"); - IPC::ResponseBuilder rb{ctx, 2}; + IPC::ResponseBuilder rb{ctx, 3, 0}; + rb.Push(ResultSuccess); + rb.PushEnum(state); +} + +void IUser::GetDeviceState(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { + IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); + rb.PushEnum(nfp_interface.GetCurrentState()); + return; } - void OpenApplicationArea(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_NFP, "(STUBBED) called"); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ErrCodes::ERR_NO_APPLICATION_AREA); - } + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); - void GetApplicationAreaSize(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_NFP, "(STUBBED) called"); - // We don't need to worry about this since we can just open the file + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::GetNpadId(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); - rb.PushRaw<u32>(0); // This is from the GetCommonInfo stub + rb.PushEnum(nfp_interface.GetNpadId()); + return; } - void GetApplicationArea(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_NFP, "(STUBBED) called"); + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} - // TODO(ogniK): Pull application area from amiibo +void IUser::GetApplicationAreaSize(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto device_handle{rp.Pop<u64>()}; + LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle); + // TODO(german77): Loop through all interfaces + if (device_handle == nfp_interface.GetHandle()) { IPC::ResponseBuilder rb{ctx, 3}; rb.Push(ResultSuccess); - rb.PushRaw<u32>(0); // This is from the GetCommonInfo stub + rb.Push(ApplicationAreaSize); + return; } - Module::Interface& nfp_interface; - KernelHelpers::ServiceContext& service_context; + LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ErrCodes::DeviceNotFound); +} + +void IUser::AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_NFP, "(STUBBED) called"); - bool has_attached_handle{}; - const u64 device_handle{0}; // Npad device 1 - const u32 npad_id{0}; // Player 1 controller - State state{State::NonInitialized}; - DeviceState device_state{DeviceState::Initialized}; - Kernel::KEvent* deactivate_event; - Kernel::KEvent* availability_change_event; -}; + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(ResultSuccess); + rb.PushCopyObjects(availability_change_event->GetReadableEvent()); +} + +Module::Interface::Interface(std::shared_ptr<Module> module_, Core::System& system_, + const char* name) + : ServiceFramework{system_, name}, module{std::move(module_)}, + npad_id{Core::HID::NpadIdType::Player1}, service_context{system_, service_name} { + activate_event = service_context.CreateEvent("IUser:NFPActivateEvent"); + deactivate_event = service_context.CreateEvent("IUser:NFPDeactivateEvent"); +} + +Module::Interface::~Interface() = default; void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_NFP, "called"); IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(ResultSuccess); - rb.PushIpcInterface<IUser>(*this, system, service_context); + rb.PushIpcInterface<IUser>(*this, system); } bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { - if (buffer.size() < sizeof(AmiiboFile)) { + if (device_state != DeviceState::SearchingForTag) { + LOG_ERROR(Service_NFP, "Game is not looking for amiibos, current state {}", device_state); + return false; + } + + constexpr auto tag_size = sizeof(NTAG215File); + constexpr auto tag_size_without_password = sizeof(NTAG215File) - sizeof(NTAG215Password); + + std::vector<u8> amiibo_buffer = buffer; + + if (amiibo_buffer.size() < tag_size_without_password) { + LOG_ERROR(Service_NFP, "Wrong file size {}", buffer.size()); + return false; + } + + // Ensure it has the correct size + if (amiibo_buffer.size() != tag_size) { + amiibo_buffer.resize(tag_size, 0); + } + + LOG_INFO(Service_NFP, "Amiibo detected"); + std::memcpy(&tag_data, buffer.data(), tag_size); + + if (!IsAmiiboValid()) { return false; } - std::memcpy(&amiibo, buffer.data(), sizeof(amiibo)); - nfc_tag_load->GetWritableEvent().Signal(); + // This value can't be dumped from a tag. Generate it + tag_data.password.PWD = GetTagPassword(tag_data.uuid); + + device_state = DeviceState::TagFound; + activate_event->GetWritableEvent().Signal(); return true; } -Kernel::KReadableEvent& Module::Interface::GetNFCEvent() { - return nfc_tag_load->GetReadableEvent(); +void Module::Interface::CloseAmiibo() { + LOG_INFO(Service_NFP, "Remove amiibo"); + device_state = DeviceState::TagRemoved; + is_application_area_initialized = false; + application_area_id = 0; + application_area_data.clear(); + deactivate_event->GetWritableEvent().Signal(); +} + +bool Module::Interface::IsAmiiboValid() const { + const auto& amiibo_data = tag_data.user_memory; + LOG_DEBUG(Service_NFP, "uuid_lock=0x{0:x}", tag_data.lock_bytes); + LOG_DEBUG(Service_NFP, "compability_container=0x{0:x}", tag_data.compability_container); + LOG_DEBUG(Service_NFP, "crypto_init=0x{0:x}", amiibo_data.crypto_init); + LOG_DEBUG(Service_NFP, "write_count={}", amiibo_data.write_count); + + LOG_DEBUG(Service_NFP, "character_id=0x{0:x}", amiibo_data.model_info.character_id); + LOG_DEBUG(Service_NFP, "character_variant={}", amiibo_data.model_info.character_variant); + LOG_DEBUG(Service_NFP, "amiibo_type={}", amiibo_data.model_info.amiibo_type); + LOG_DEBUG(Service_NFP, "model_number=0x{0:x}", amiibo_data.model_info.model_number); + LOG_DEBUG(Service_NFP, "series={}", amiibo_data.model_info.series); + LOG_DEBUG(Service_NFP, "fixed_value=0x{0:x}", amiibo_data.model_info.fixed); + + LOG_DEBUG(Service_NFP, "tag_dynamic_lock=0x{0:x}", tag_data.dynamic_lock); + LOG_DEBUG(Service_NFP, "tag_CFG0=0x{0:x}", tag_data.CFG0); + LOG_DEBUG(Service_NFP, "tag_CFG1=0x{0:x}", tag_data.CFG1); + + // Check against all know constants on an amiibo binary + if (tag_data.lock_bytes != 0xE00F) { + return false; + } + if (tag_data.compability_container != 0xEEFF10F1U) { + return false; + } + if ((amiibo_data.crypto_init & 0xFF) != 0xA5) { + return false; + } + if (amiibo_data.model_info.fixed != 0x02) { + return false; + } + if ((tag_data.dynamic_lock & 0xFFFFFF) != 0x0F0001) { + return false; + } + if (tag_data.CFG0 != 0x04000000U) { + return false; + } + if (tag_data.CFG1 != 0x5F) { + return false; + } + return true; +} + +Kernel::KReadableEvent& Module::Interface::GetActivateEvent() const { + return activate_event->GetReadableEvent(); +} + +Kernel::KReadableEvent& Module::Interface::GetDeactivateEvent() const { + return deactivate_event->GetReadableEvent(); +} + +void Module::Interface::Initialize() { + device_state = DeviceState::Initialized; +} + +void Module::Interface::Finalize() { + device_state = DeviceState::Unaviable; + is_application_area_initialized = false; + application_area_id = 0; + application_area_data.clear(); +} + +ResultCode Module::Interface::StartDetection(s32 protocol_) { + auto npad_device = system.HIDCore().GetEmulatedController(npad_id); + + // TODO(german77): Add callback for when nfc data is available + + if (device_state == DeviceState::Initialized || device_state == DeviceState::TagRemoved) { + npad_device->SetPollingMode(Common::Input::PollingMode::NFC); + device_state = DeviceState::SearchingForTag; + protocol = protocol_; + return ResultSuccess; + } + + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; +} + +ResultCode Module::Interface::StopDetection() { + auto npad_device = system.HIDCore().GetEmulatedController(npad_id); + npad_device->SetPollingMode(Common::Input::PollingMode::Active); + + if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) { + CloseAmiibo(); + return ResultSuccess; + } + if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) { + device_state = DeviceState::Initialized; + return ResultSuccess; + } + + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; +} + +ResultCode Module::Interface::Mount() { + if (device_state == DeviceState::TagFound) { + device_state = DeviceState::TagMounted; + return ResultSuccess; + } + + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; +} + +ResultCode Module::Interface::Unmount() { + if (device_state == DeviceState::TagMounted) { + is_application_area_initialized = false; + application_area_id = 0; + application_area_data.clear(); + device_state = DeviceState::TagFound; + return ResultSuccess; + } + + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; +} + +ResultCode Module::Interface::GetTagInfo(TagInfo& tag_info) const { + if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) { + tag_info = { + .uuid = tag_data.uuid, + .uuid_length = static_cast<u8>(tag_data.uuid.size()), + .protocol = protocol, + .tag_type = static_cast<u32>(tag_data.user_memory.model_info.amiibo_type), + }; + return ResultSuccess; + } + + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; +} + +ResultCode Module::Interface::GetCommonInfo(CommonInfo& common_info) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; + } + + // Read this data from the amiibo save file + common_info = { + .last_write_year = 2022, + .last_write_month = 2, + .last_write_day = 7, + .write_counter = tag_data.user_memory.write_count, + .version = 1, + .application_area_size = ApplicationAreaSize, + }; + return ResultSuccess; +} + +ResultCode Module::Interface::GetModelInfo(ModelInfo& model_info) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; + } + + model_info = tag_data.user_memory.model_info; + return ResultSuccess; +} + +ResultCode Module::Interface::GetRegisterInfo(RegisterInfo& register_info) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; + } + + Service::Mii::MiiManager manager; + + // Read this data from the amiibo save file + register_info = { + .mii_char_info = manager.BuildDefault(0), + .first_write_year = 2022, + .first_write_month = 2, + .first_write_day = 7, + .amiibo_name = {'Y', 'u', 'z', 'u', 'A', 'm', 'i', 'i', 'b', 'o', 0}, + .unknown = {}, + }; + return ResultSuccess; +} + +ResultCode Module::Interface::OpenApplicationArea(u32 access_id) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; + } + if (AmiiboApplicationDataExist(access_id)) { + application_area_data = LoadAmiiboApplicationData(access_id); + application_area_id = access_id; + is_application_area_initialized = true; + } + if (!is_application_area_initialized) { + LOG_WARNING(Service_NFP, "Application area is not initialized"); + return ErrCodes::ApplicationAreaIsNotInitialized; + } + return ResultSuccess; +} + +ResultCode Module::Interface::GetApplicationArea(std::vector<u8>& data) const { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; + } + if (!is_application_area_initialized) { + LOG_ERROR(Service_NFP, "Application area is not initialized"); + return ErrCodes::ApplicationAreaIsNotInitialized; + } + + data = application_area_data; + + return ResultSuccess; +} + +ResultCode Module::Interface::SetApplicationArea(const std::vector<u8>& data) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; + } + if (!is_application_area_initialized) { + LOG_ERROR(Service_NFP, "Application area is not initialized"); + return ErrCodes::ApplicationAreaIsNotInitialized; + } + application_area_data = data; + SaveAmiiboApplicationData(application_area_id, application_area_data); + return ResultSuccess; +} + +ResultCode Module::Interface::CreateApplicationArea(u32 access_id, const std::vector<u8>& data) { + if (device_state != DeviceState::TagMounted) { + LOG_ERROR(Service_NFP, "Wrong device state {}", device_state); + return ErrCodes::WrongDeviceState; + } + if (AmiiboApplicationDataExist(access_id)) { + LOG_ERROR(Service_NFP, "Application area already exist"); + return ErrCodes::ApplicationAreaExist; + } + application_area_data = data; + application_area_id = access_id; + SaveAmiiboApplicationData(application_area_id, application_area_data); + return ResultSuccess; +} + +bool Module::Interface::AmiiboApplicationDataExist(u32 access_id) const { + // TODO(german77): Check if file exist + return false; +} + +std::vector<u8> Module::Interface::LoadAmiiboApplicationData(u32 access_id) const { + // TODO(german77): Read file + std::vector<u8> data(ApplicationAreaSize); + return data; +} + +void Module::Interface::SaveAmiiboApplicationData(u32 access_id, + const std::vector<u8>& data) const { + // TODO(german77): Save file +} + +u64 Module::Interface::GetHandle() const { + // Generate a handle based of the npad id + return static_cast<u64>(npad_id); +} + +DeviceState Module::Interface::GetCurrentState() const { + return device_state; +} + +Core::HID::NpadIdType Module::Interface::GetNpadId() const { + return npad_id; } -const Module::Interface::AmiiboFile& Module::Interface::GetAmiiboBuffer() const { - return amiibo; +u32 Module::Interface::GetTagPassword(const TagUuid& uuid) const { + // Verifiy that the generated password is correct + u32 password = 0xAA ^ (uuid[1] ^ uuid[3]); + password &= (0x55 ^ (uuid[2] ^ uuid[4])) << 8; + password &= (0xAA ^ (uuid[3] ^ uuid[5])) << 16; + password &= (0x55 ^ (uuid[4] ^ uuid[6])) << 24; + return password; } void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) { diff --git a/src/core/hle/service/nfp/nfp.h b/src/core/hle/service/nfp/nfp.h index 95c127efb..022f13b29 100644 --- a/src/core/hle/service/nfp/nfp.h +++ b/src/core/hle/service/nfp/nfp.h @@ -7,15 +7,132 @@ #include <array> #include <vector> +#include "common/common_funcs.h" #include "core/hle/service/kernel_helpers.h" +#include "core/hle/service/mii/mii_manager.h" #include "core/hle/service/service.h" namespace Kernel { class KEvent; -} +class KReadableEvent; +} // namespace Kernel + +namespace Core::HID { +enum class NpadIdType : u32; +} // namespace Core::HID namespace Service::NFP { +enum class ServiceType : u32 { + User, + Debug, + System, +}; + +enum class State : u32 { + NonInitialized, + Initialized, +}; + +enum class DeviceState : u32 { + Initialized, + SearchingForTag, + TagFound, + TagRemoved, + TagMounted, + Unaviable, + Finalized, +}; + +enum class ModelType : u32 { + Amiibo, +}; + +enum class MountTarget : u32 { + Rom, + Ram, + All, +}; + +enum class AmiiboType : u8 { + Figure, + Card, + Yarn, +}; + +enum class AmiiboSeries : u8 { + SuperSmashBros, + SuperMario, + ChibiRobo, + YoshiWoollyWorld, + Splatoon, + AnimalCrossing, + EightBitMario, + Skylanders, + Unknown8, + TheLegendOfZelda, + ShovelKnight, + Unknown11, + Kiby, + Pokemon, + MarioSportsSuperstars, + MonsterHunter, + BoxBoy, + Pikmin, + FireEmblem, + Metroid, + Others, + MegaMan, + Diablo +}; + +using TagUuid = std::array<u8, 10>; + +struct TagInfo { + TagUuid uuid; + u8 uuid_length; + INSERT_PADDING_BYTES(0x15); + s32 protocol; + u32 tag_type; + INSERT_PADDING_BYTES(0x30); +}; +static_assert(sizeof(TagInfo) == 0x58, "TagInfo is an invalid size"); + +struct CommonInfo { + u16 last_write_year; + u8 last_write_month; + u8 last_write_day; + u16 write_counter; + u16 version; + u32 application_area_size; + INSERT_PADDING_BYTES(0x34); +}; +static_assert(sizeof(CommonInfo) == 0x40, "CommonInfo is an invalid size"); + +struct ModelInfo { + u16 character_id; + u8 character_variant; + AmiiboType amiibo_type; + u16 model_number; + AmiiboSeries series; + u8 fixed; // Must be 02 + INSERT_PADDING_BYTES(0x4); // Unknown + INSERT_PADDING_BYTES(0x20); // Probably a SHA256-(HMAC?) hash + INSERT_PADDING_BYTES(0x14); // SHA256-HMAC +}; +static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size"); + +struct RegisterInfo { + Service::Mii::MiiInfo mii_char_info; + u16 first_write_year; + u8 first_write_month; + u8 first_write_day; + std::array<u8, 11> amiibo_name; + u8 unknown; + INSERT_PADDING_BYTES(0x98); +}; +static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size"); + class Module final { public: class Interface : public ServiceFramework<Interface> { @@ -24,34 +141,131 @@ public: const char* name); ~Interface() override; - struct ModelInfo { - std::array<u8, 0x8> amiibo_identification_block; - INSERT_PADDING_BYTES(0x38); + struct EncryptedAmiiboFile { + u16 crypto_init; // Must be A5 XX + u16 write_count; // Number of times the amiibo has been written? + INSERT_PADDING_BYTES(0x20); // System crypts + INSERT_PADDING_BYTES(0x20); // SHA256-(HMAC?) hash + ModelInfo model_info; // This struct is bigger than documentation + INSERT_PADDING_BYTES(0xC); // SHA256-HMAC + INSERT_PADDING_BYTES(0x114); // section 1 encrypted buffer + INSERT_PADDING_BYTES(0x54); // section 2 encrypted buffer + }; + static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size"); + + struct NTAG215Password { + u32 PWD; // Password to allow write access + u16 PACK; // Password acknowledge reply + u16 RFUI; // Reserved for future use }; - static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size"); + static_assert(sizeof(NTAG215Password) == 0x8, "NTAG215Password is an invalid size"); - struct AmiiboFile { - std::array<u8, 10> uuid; - INSERT_PADDING_BYTES(0x4a); - ModelInfo model_info; + struct NTAG215File { + TagUuid uuid; // Unique serial number + u16 lock_bytes; // Set defined pages as read only + u32 compability_container; // Defines available memory + EncryptedAmiiboFile user_memory; // Writable data + u32 dynamic_lock; // Dynamic lock + u32 CFG0; // Defines memory protected by password + u32 CFG1; // Defines number of verification attempts + NTAG215Password password; // Password data }; - static_assert(sizeof(AmiiboFile) == 0x94, "AmiiboFile is an invalid size"); + static_assert(sizeof(NTAG215File) == 0x21C, "NTAG215File is an invalid size"); void CreateUserInterface(Kernel::HLERequestContext& ctx); bool LoadAmiibo(const std::vector<u8>& buffer); - Kernel::KReadableEvent& GetNFCEvent(); - const AmiiboFile& GetAmiiboBuffer() const; + void CloseAmiibo(); + + void Initialize(); + void Finalize(); + + ResultCode StartDetection(s32 protocol_); + ResultCode StopDetection(); + ResultCode Mount(); + ResultCode Unmount(); + + ResultCode GetTagInfo(TagInfo& tag_info) const; + ResultCode GetCommonInfo(CommonInfo& common_info) const; + ResultCode GetModelInfo(ModelInfo& model_info) const; + ResultCode GetRegisterInfo(RegisterInfo& register_info) const; + + ResultCode OpenApplicationArea(u32 access_id); + ResultCode GetApplicationArea(std::vector<u8>& data) const; + ResultCode SetApplicationArea(const std::vector<u8>& data); + ResultCode CreateApplicationArea(u32 access_id, const std::vector<u8>& data); + + u64 GetHandle() const; + DeviceState GetCurrentState() const; + Core::HID::NpadIdType GetNpadId() const; + + Kernel::KReadableEvent& GetActivateEvent() const; + Kernel::KReadableEvent& GetDeactivateEvent() const; protected: std::shared_ptr<Module> module; private: + /// Validates that the amiibo file is not corrupted + bool IsAmiiboValid() const; + + bool AmiiboApplicationDataExist(u32 access_id) const; + std::vector<u8> LoadAmiiboApplicationData(u32 access_id) const; + void SaveAmiiboApplicationData(u32 access_id, const std::vector<u8>& data) const; + + /// return password needed to allow write access to protected memory + u32 GetTagPassword(const TagUuid& uuid) const; + + const Core::HID::NpadIdType npad_id; + + DeviceState device_state{DeviceState::Unaviable}; KernelHelpers::ServiceContext service_context; - Kernel::KEvent* nfc_tag_load; - AmiiboFile amiibo{}; + Kernel::KEvent* activate_event; + Kernel::KEvent* deactivate_event; + NTAG215File tag_data{}; + s32 protocol; + bool is_application_area_initialized{}; + u32 application_area_id; + std::vector<u8> application_area_data; }; }; +class IUser final : public ServiceFramework<IUser> { +public: + explicit IUser(Module::Interface& nfp_interface_, Core::System& system_); + +private: + void Initialize(Kernel::HLERequestContext& ctx); + void Finalize(Kernel::HLERequestContext& ctx); + void ListDevices(Kernel::HLERequestContext& ctx); + void StartDetection(Kernel::HLERequestContext& ctx); + void StopDetection(Kernel::HLERequestContext& ctx); + void Mount(Kernel::HLERequestContext& ctx); + void Unmount(Kernel::HLERequestContext& ctx); + void OpenApplicationArea(Kernel::HLERequestContext& ctx); + void GetApplicationArea(Kernel::HLERequestContext& ctx); + void SetApplicationArea(Kernel::HLERequestContext& ctx); + void CreateApplicationArea(Kernel::HLERequestContext& ctx); + void GetTagInfo(Kernel::HLERequestContext& ctx); + void GetRegisterInfo(Kernel::HLERequestContext& ctx); + void GetCommonInfo(Kernel::HLERequestContext& ctx); + void GetModelInfo(Kernel::HLERequestContext& ctx); + void AttachActivateEvent(Kernel::HLERequestContext& ctx); + void AttachDeactivateEvent(Kernel::HLERequestContext& ctx); + void GetState(Kernel::HLERequestContext& ctx); + void GetDeviceState(Kernel::HLERequestContext& ctx); + void GetNpadId(Kernel::HLERequestContext& ctx); + void GetApplicationAreaSize(Kernel::HLERequestContext& ctx); + void AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx); + + KernelHelpers::ServiceContext service_context; + + // TODO(german77): We should have a vector of interfaces + Module::Interface& nfp_interface; + + State state{State::NonInitialized}; + Kernel::KEvent* availability_change_event; +}; + void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system); } // namespace Service::NFP diff --git a/src/core/hle/service/ns/pdm_qry.cpp b/src/core/hle/service/ns/pdm_qry.cpp index e2fab5c3f..36ce46353 100644 --- a/src/core/hle/service/ns/pdm_qry.cpp +++ b/src/core/hle/service/ns/pdm_qry.cpp @@ -59,7 +59,7 @@ void PDM_QRY::QueryPlayStatisticsByApplicationIdAndUserAccountId(Kernel::HLERequ LOG_WARNING(Service_NS, "(STUBBED) called. unknown={}. application_id=0x{:016X}, user_account_uid=0x{}", - unknown, application_id, user_account_uid.Format()); + unknown, application_id, user_account_uid.RawString()); IPC::ResponseBuilder rb{ctx, 12}; rb.Push(ResultSuccess); diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp index 277abc17a..057666021 100644 --- a/src/core/hle/service/pm/pm.cpp +++ b/src/core/hle/service/pm/pm.cpp @@ -91,6 +91,8 @@ public: {4, &DebugMonitor::GetApplicationProcessId, "GetApplicationProcessId"}, {5, nullptr, "HookToCreateApplicationProgress"}, {6, nullptr, "ClearHook"}, + {65000, &DebugMonitor::AtmosphereGetProcessInfo, "AtmosphereGetProcessInfo"}, + {65001, nullptr, "AtmosphereGetCurrentLimitInfo"}, }; // clang-format on @@ -125,6 +127,49 @@ private: GetApplicationPidGeneric(ctx, kernel.GetProcessList()); } + void AtmosphereGetProcessInfo(Kernel::HLERequestContext& ctx) { + // https://github.com/Atmosphere-NX/Atmosphere/blob/master/stratosphere/pm/source/impl/pm_process_manager.cpp#L614 + // This implementation is incomplete; only a handle to the process is returned. + IPC::RequestParser rp{ctx}; + const auto pid = rp.PopRaw<u64>(); + + LOG_WARNING(Service_PM, "(Partial Implementation) called, pid={:016X}", pid); + + const auto process = SearchProcessList(kernel.GetProcessList(), [pid](const auto& proc) { + return proc->GetProcessID() == pid; + }); + + if (!process.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultProcessNotFound); + return; + } + + struct ProgramLocation { + u64 program_id; + u8 storage_id; + }; + static_assert(sizeof(ProgramLocation) == 0x10, "ProgramLocation has an invalid size"); + + struct OverrideStatus { + u64 keys_held; + u64 flags; + }; + static_assert(sizeof(OverrideStatus) == 0x10, "OverrideStatus has an invalid size"); + + OverrideStatus override_status{}; + ProgramLocation program_location{ + .program_id = (*process)->GetProgramID(), + .storage_id = 0, + }; + + IPC::ResponseBuilder rb{ctx, 10, 1}; + rb.Push(ResultSuccess); + rb.PushCopyObjects(*process); + rb.PushRaw(program_location); + rb.PushRaw(override_status); + } + const Kernel::KernelCore& kernel; }; diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index f54e6fe56..eb1138313 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp @@ -39,6 +39,7 @@ #include "core/hle/service/mig/mig.h" #include "core/hle/service/mii/mii.h" #include "core/hle/service/mm/mm_u.h" +#include "core/hle/service/mnpp/mnpp_app.h" #include "core/hle/service/ncm/ncm.h" #include "core/hle/service/nfc/nfc.h" #include "core/hle/service/nfp/nfp.h" @@ -265,6 +266,7 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system Migration::InstallInterfaces(*sm, system); Mii::InstallInterfaces(*sm, system); MM::InstallInterfaces(*sm, system); + MNPP::InstallInterfaces(*sm, system); NCM::InstallInterfaces(*sm, system); NFC::InstallInterfaces(*sm, system); NFP::InstallInterfaces(*sm, system); diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp index eaa172595..695a1faa6 100644 --- a/src/core/hle/service/sm/sm.cpp +++ b/src/core/hle/service/sm/sm.cpp @@ -81,6 +81,8 @@ ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name } auto* port = Kernel::KPort::Create(kernel); + SCOPE_EXIT({ port->Close(); }); + port->Initialize(ServerSessionCountMax, false, name); auto handler = it->second; port->GetServerPort().SetSessionHandler(std::move(handler)); diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp index f83272633..3dbac5a23 100644 --- a/src/core/hle/service/sockets/bsd.cpp +++ b/src/core/hle/service/sockets/bsd.cpp @@ -569,9 +569,9 @@ std::pair<s32, Errno> BSD::AcceptImpl(s32 fd, std::vector<u8>& write_buffer) { new_descriptor.socket = std::move(result.socket); new_descriptor.is_connection_based = descriptor.is_connection_based; - ASSERT(write_buffer.size() == sizeof(SockAddrIn)); const SockAddrIn guest_addr_in = Translate(result.sockaddr_in); - std::memcpy(write_buffer.data(), &guest_addr_in, sizeof(guest_addr_in)); + const size_t length = std::min(sizeof(guest_addr_in), write_buffer.size()); + std::memcpy(write_buffer.data(), &guest_addr_in, length); return {new_fd, Errno::SUCCESS}; } diff --git a/src/core/hle/service/time/clock_types.h b/src/core/hle/service/time/clock_types.h index 392e16863..d0cacb80c 100644 --- a/src/core/hle/service/time/clock_types.h +++ b/src/core/hle/service/time/clock_types.h @@ -36,7 +36,7 @@ struct SteadyClockTimePoint { } static SteadyClockTimePoint GetRandom() { - return {0, Common::UUID::Generate()}; + return {0, Common::UUID::MakeRandom()}; } }; static_assert(sizeof(SteadyClockTimePoint) == 0x18, "SteadyClockTimePoint is incorrect size"); diff --git a/src/core/hle/service/time/steady_clock_core.h b/src/core/hle/service/time/steady_clock_core.h index d80a2385f..5ee2c0e0a 100644 --- a/src/core/hle/service/time/steady_clock_core.h +++ b/src/core/hle/service/time/steady_clock_core.h @@ -49,7 +49,7 @@ public: } private: - Common::UUID clock_source_id{Common::UUID::Generate()}; + Common::UUID clock_source_id{Common::UUID::MakeRandom()}; bool is_initialized{}; }; diff --git a/src/core/hle/service/time/time_manager.cpp b/src/core/hle/service/time/time_manager.cpp index c1e4e6cce..00f1ae8cf 100644 --- a/src/core/hle/service/time/time_manager.cpp +++ b/src/core/hle/service/time/time_manager.cpp @@ -45,7 +45,7 @@ struct TimeManager::Impl final { time_zone_content_manager{system} { const auto system_time{Clock::TimeSpanType::FromSeconds(GetExternalRtcValue())}; - SetupStandardSteadyClock(system, Common::UUID::Generate(), system_time, {}, {}); + SetupStandardSteadyClock(system, Common::UUID::MakeRandom(), system_time, {}, {}); SetupStandardLocalSystemClock(system, {}, system_time.ToSeconds()); Clock::SystemClockContext clock_context{}; diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 0979fc421..329f4ba86 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h @@ -28,10 +28,10 @@ class Layer; /// Represents a single display type class Display { +public: YUZU_NON_COPYABLE(Display); YUZU_NON_MOVEABLE(Display); -public: /// Constructs a display with a given unique ID and name. /// /// @param id The unique ID for this display. diff --git a/src/core/loader/loader.h b/src/core/loader/loader.h index 7b1bac3f7..8b6b3b68f 100644 --- a/src/core/loader/loader.h +++ b/src/core/loader/loader.h @@ -11,6 +11,7 @@ #include <utility> #include <vector> +#include "common/common_funcs.h" #include "common/common_types.h" #include "core/file_sys/control_metadata.h" #include "core/file_sys/vfs.h" @@ -139,8 +140,11 @@ std::string GetResultStatusString(ResultStatus status); std::ostream& operator<<(std::ostream& os, ResultStatus status); /// Interface for loading an application -class AppLoader : NonCopyable { +class AppLoader { public: + YUZU_NON_COPYABLE(AppLoader); + YUZU_NON_MOVEABLE(AppLoader); + struct LoadParameters { s32 main_thread_priority; u64 main_thread_stack_size; diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 88d6ec908..28d30eee2 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -39,8 +39,7 @@ struct Memory::Impl { void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - ASSERT_MSG(target >= DramMemoryMap::Base && target < DramMemoryMap::End, - "Out of bounds target: {:016X}", target); + ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); if (Settings::IsFastmemEnabled()) { |