summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/CMakeLists.txt42
-rw-r--r--src/core/arm/arm_interface.cpp24
-rw-r--r--src/core/arm/arm_interface.h3
-rw-r--r--src/core/arm/nce/arm_nce.cpp400
-rw-r--r--src/core/arm/nce/arm_nce.h108
-rw-r--r--src/core/arm/nce/arm_nce.s222
-rw-r--r--src/core/arm/nce/arm_nce_asm_definitions.h29
-rw-r--r--src/core/arm/nce/guest_context.h50
-rw-r--r--src/core/arm/nce/instructions.h147
-rw-r--r--src/core/arm/nce/patcher.cpp474
-rw-r--r--src/core/arm/nce/patcher.h98
-rw-r--r--src/core/core.cpp11
-rw-r--r--src/core/core_timing.cpp2
-rw-r--r--src/core/core_timing.h2
-rw-r--r--src/core/cpu_manager.cpp2
-rw-r--r--src/core/debugger/debugger.cpp24
-rw-r--r--src/core/debugger/gdbstub.cpp275
-rw-r--r--src/core/device_memory.cpp3
-rw-r--r--src/core/file_sys/program_metadata.cpp10
-rw-r--r--src/core/file_sys/program_metadata.h15
-rw-r--r--src/core/file_sys/romfs.cpp44
-rw-r--r--src/core/file_sys/romfs.h9
-rw-r--r--src/core/frontend/emu_window.h10
-rw-r--r--src/core/hid/emulated_console.h8
-rw-r--r--src/core/hid/emulated_controller.cpp144
-rw-r--r--src/core/hid/emulated_controller.h4
-rw-r--r--src/core/hid/hid_core.cpp5
-rw-r--r--src/core/hid/hid_types.h110
-rw-r--r--src/core/hid/input_interpreter.cpp11
-rw-r--r--src/core/hid/input_interpreter.h4
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp68
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h18
-rw-r--r--src/core/hle/kernel/code_set.h14
-rw-r--r--src/core/hle/kernel/k_address_space_info.cpp4
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp39
-rw-r--r--src/core/hle/kernel/k_capabilities.h21
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp22
-rw-r--r--src/core/hle/kernel/k_condition_variable.h9
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/k_device_address_space.h10
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_memory_layout.h8
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp121
-rw-r--r--src/core/hle/kernel/k_memory_manager.h12
-rw-r--r--src/core/hle/kernel/k_page_table.cpp3519
-rw-r--r--src/core/hle/kernel/k_page_table.h542
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp5739
-rw-r--r--src/core/hle/kernel/k_page_table_base.h760
-rw-r--r--src/core/hle/kernel/k_process.cpp1453
-rw-r--r--src/core/hle/kernel/k_process.h734
-rw-r--r--src/core/hle/kernel/k_process_page_table.h481
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp4
-rw-r--r--src/core/hle/kernel/k_server_session.cpp2
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp87
-rw-r--r--src/core/hle/kernel/k_thread.cpp28
-rw-r--r--src/core/hle/kernel/k_thread.h17
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp4
-rw-r--r--src/core/hle/kernel/kernel.cpp53
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/physical_core.cpp14
-rw-r--r--src/core/hle/kernel/process_capability.cpp389
-rw-r--r--src/core/hle/kernel/process_capability.h266
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp28
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_memory.cpp6
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp13
-rw-r--r--src/core/hle/kernel/svc/svc_process_memory.cpp3
-rw-r--r--src/core/hle/kernel/svc/svc_query_memory.cpp8
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp7
-rw-r--r--src/core/hle/kernel/svc_generator.py2
-rw-r--r--src/core/hle/kernel/svc_types.h46
-rw-r--r--src/core/hle/result.h31
-rw-r--r--src/core/hle/service/acc/acc.cpp56
-rw-r--r--src/core/hle/service/am/am.cpp191
-rw-r--r--src/core/hle/service/am/am.h6
-rw-r--r--src/core/hle/service/am/applets/applet_cabinet.cpp9
-rw-r--r--src/core/hle/service/am/applets/applet_controller.h2
-rw-r--r--src/core/hle/service/am/applets/applet_web_browser.cpp3
-rw-r--r--src/core/hle/service/am/applets/applets.h24
-rw-r--r--src/core/hle/service/btm/btm.cpp56
-rw-r--r--src/core/hle/service/caps/caps_manager.cpp16
-rw-r--r--src/core/hle/service/caps/caps_manager.h9
-rw-r--r--src/core/hle/service/caps/caps_ss.cpp10
-rw-r--r--src/core/hle/service/caps/caps_su.cpp42
-rw-r--r--src/core/hle/service/caps/caps_su.h9
-rw-r--r--src/core/hle/service/friend/friend.cpp13
-rw-r--r--src/core/hle/service/hid/controllers/console_six_axis.cpp42
-rw-r--r--src/core/hle/service/hid/controllers/console_six_axis.h43
-rw-r--r--src/core/hle/service/hid/controllers/controller_base.cpp9
-rw-r--r--src/core/hle/service/hid/controllers/controller_base.h4
-rw-r--r--src/core/hle/service/hid/controllers/debug_pad.cpp10
-rw-r--r--src/core/hle/service/hid/controllers/debug_pad.h6
-rw-r--r--src/core/hle/service/hid/controllers/gesture.cpp54
-rw-r--r--src/core/hle/service/hid/controllers/gesture.h6
-rw-r--r--src/core/hle/service/hid/controllers/keyboard.cpp10
-rw-r--r--src/core/hle/service/hid/controllers/keyboard.h6
-rw-r--r--src/core/hle/service/hid/controllers/mouse.cpp11
-rw-r--r--src/core/hle/service/hid/controllers/mouse.h6
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp588
-rw-r--r--src/core/hle/service/hid/controllers/npad.h205
-rw-r--r--src/core/hle/service/hid/controllers/palma.cpp90
-rw-r--r--src/core/hle/service/hid/controllers/palma.h8
-rw-r--r--src/core/hle/service/hid/controllers/seven_six_axis.cpp (renamed from src/core/hle/service/hid/controllers/console_sixaxis.cpp)38
-rw-r--r--src/core/hle/service/hid/controllers/seven_six_axis.h (renamed from src/core/hle/service/hid/controllers/console_sixaxis.h)31
-rw-r--r--src/core/hle/service/hid/controllers/six_axis.cpp413
-rw-r--r--src/core/hle/service/hid/controllers/six_axis.h111
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.cpp23
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.h10
-rw-r--r--src/core/hle/service/hid/controllers/xpad.cpp11
-rw-r--r--src/core/hle/service/hid/controllers/xpad.h6
-rw-r--r--src/core/hle/service/hid/hid.cpp2857
-rw-r--r--src/core/hle/service/hid/hid.h212
-rw-r--r--src/core/hle/service/hid/hid_debug_server.cpp159
-rw-r--r--src/core/hle/service/hid/hid_debug_server.h26
-rw-r--r--src/core/hle/service/hid/hid_firmware_settings.cpp99
-rw-r--r--src/core/hle/service/hid/hid_firmware_settings.h54
-rw-r--r--src/core/hle/service/hid/hid_server.cpp2387
-rw-r--r--src/core/hle/service/hid/hid_server.h150
-rw-r--r--src/core/hle/service/hid/hid_system_server.cpp539
-rw-r--r--src/core/hle/service/hid/hid_system_server.h63
-rw-r--r--src/core/hle/service/hid/hid_util.h146
-rw-r--r--src/core/hle/service/hid/hidbus/hidbus_base.cpp5
-rw-r--r--src/core/hle/service/hid/irs.cpp7
-rw-r--r--src/core/hle/service/hid/irs.h5
-rw-r--r--src/core/hle/service/hid/irsensor/clustering_processor.cpp16
-rw-r--r--src/core/hle/service/hid/irsensor/clustering_processor.h9
-rw-r--r--src/core/hle/service/hid/irsensor/image_transfer_processor.cpp2
-rw-r--r--src/core/hle/service/hid/irsensor/moment_processor.cpp123
-rw-r--r--src/core/hle/service/hid/irsensor/moment_processor.h34
-rw-r--r--src/core/hle/service/hid/resource_manager.cpp241
-rw-r--r--src/core/hle/service/hid/resource_manager.h111
-rw-r--r--src/core/hle/service/hid/ring_lifo.h6
-rw-r--r--src/core/hle/service/kernel_helpers.cpp6
-rw-r--r--src/core/hle/service/ldn/ldn.cpp10
-rw-r--r--src/core/hle/service/ldr/ldr.cpp45
-rw-r--r--src/core/hle/service/mii/types/ver3_store_data.cpp2
-rw-r--r--src/core/hle/service/nfc/common/device.cpp68
-rw-r--r--src/core/hle/service/nfc/common/device.h1
-rw-r--r--src/core/hle/service/nfc/common/device_manager.cpp3
-rw-r--r--src/core/hle/service/nvdrv/devices/ioctl_serialization.h159
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp82
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h20
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp42
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.h12
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp115
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h29
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp117
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h35
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp15
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp87
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp7
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp13
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp47
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h12
-rw-r--r--src/core/hle/service/nvnflinger/buffer_item.h2
-rw-r--r--src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp27
-rw-r--r--src/core/hle/service/nvnflinger/buffer_queue_consumer.h9
-rw-r--r--src/core/hle/service/nvnflinger/buffer_queue_core.cpp18
-rw-r--r--src/core/hle/service/nvnflinger/buffer_queue_core.h3
-rw-r--r--src/core/hle/service/nvnflinger/buffer_queue_producer.cpp24
-rw-r--r--src/core/hle/service/nvnflinger/buffer_queue_producer.h3
-rw-r--r--src/core/hle/service/nvnflinger/buffer_slot.h2
-rw-r--r--src/core/hle/service/nvnflinger/buffer_transform_flags.h2
-rw-r--r--src/core/hle/service/nvnflinger/consumer_base.cpp20
-rw-r--r--src/core/hle/service/nvnflinger/consumer_base.h2
-rw-r--r--src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp29
-rw-r--r--src/core/hle/service/nvnflinger/nvnflinger.cpp33
-rw-r--r--src/core/hle/service/nvnflinger/nvnflinger.h5
-rw-r--r--src/core/hle/service/nvnflinger/status.h2
-rw-r--r--src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp34
-rw-r--r--src/core/hle/service/nvnflinger/ui/graphic_buffer.h25
-rw-r--r--src/core/hle/service/pctl/pctl_module.cpp6
-rw-r--r--src/core/hle/service/pm/pm.cpp2
-rw-r--r--src/core/hle/service/set/set_sys.cpp100
-rw-r--r--src/core/hle/service/set/set_sys.h36
-rw-r--r--src/core/hle/service/sockets/bsd.cpp77
-rw-r--r--src/core/hle/service/sockets/bsd.h2
-rw-r--r--src/core/hle/service/time/clock_types.h5
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp2
-rw-r--r--src/core/loader/deconstructed_rom_directory.cpp63
-rw-r--r--src/core/loader/kip.cpp3
-rw-r--r--src/core/loader/nro.cpp63
-rw-r--r--src/core/loader/nro.h2
-rw-r--r--src/core/loader/nso.cpp67
-rw-r--r--src/core/loader/nso.h7
-rw-r--r--src/core/memory.cpp102
-rw-r--r--src/core/memory.h18
-rw-r--r--src/core/memory/cheat_engine.cpp12
-rw-r--r--src/core/reporter.cpp2
193 files changed, 17473 insertions, 10606 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index e4f499135..85583941c 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -271,8 +271,9 @@ add_library(core STATIC
hle/kernel/k_page_heap.h
hle/kernel/k_page_group.cpp
hle/kernel/k_page_group.h
- hle/kernel/k_page_table.cpp
hle/kernel/k_page_table.h
+ hle/kernel/k_page_table_base.cpp
+ hle/kernel/k_page_table_base.h
hle/kernel/k_page_table_manager.h
hle/kernel/k_page_table_slab_heap.h
hle/kernel/k_port.cpp
@@ -280,6 +281,7 @@ add_library(core STATIC
hle/kernel/k_priority_queue.h
hle/kernel/k_process.cpp
hle/kernel/k_process.h
+ hle/kernel/k_process_page_table.h
hle/kernel/k_readable_event.cpp
hle/kernel/k_readable_event.h
hle/kernel/k_resource_limit.cpp
@@ -330,8 +332,6 @@ add_library(core STATIC
hle/kernel/physical_core.cpp
hle/kernel/physical_core.h
hle/kernel/physical_memory.h
- hle/kernel/process_capability.cpp
- hle/kernel/process_capability.h
hle/kernel/slab_helpers.h
hle/kernel/svc.cpp
hle/kernel/svc.h
@@ -521,17 +521,28 @@ add_library(core STATIC
hle/service/grc/grc.h
hle/service/hid/hid.cpp
hle/service/hid/hid.h
+ hle/service/hid/hid_debug_server.cpp
+ hle/service/hid/hid_debug_server.h
+ hle/service/hid/hid_firmware_settings.cpp
+ hle/service/hid/hid_firmware_settings.h
+ hle/service/hid/hid_server.cpp
+ hle/service/hid/hid_server.h
+ hle/service/hid/hid_system_server.cpp
+ hle/service/hid/hid_system_server.h
+ hle/service/hid/hid_util.h
hle/service/hid/hidbus.cpp
hle/service/hid/hidbus.h
hle/service/hid/irs.cpp
hle/service/hid/irs.h
hle/service/hid/irs_ring_lifo.h
+ hle/service/hid/resource_manager.cpp
+ hle/service/hid/resource_manager.h
hle/service/hid/ring_lifo.h
hle/service/hid/xcd.cpp
hle/service/hid/xcd.h
hle/service/hid/errors.h
- hle/service/hid/controllers/console_sixaxis.cpp
- hle/service/hid/controllers/console_sixaxis.h
+ hle/service/hid/controllers/console_six_axis.cpp
+ hle/service/hid/controllers/console_six_axis.h
hle/service/hid/controllers/controller_base.cpp
hle/service/hid/controllers/controller_base.h
hle/service/hid/controllers/debug_pad.cpp
@@ -546,6 +557,10 @@ add_library(core STATIC
hle/service/hid/controllers/npad.h
hle/service/hid/controllers/palma.cpp
hle/service/hid/controllers/palma.h
+ hle/service/hid/controllers/seven_six_axis.cpp
+ hle/service/hid/controllers/seven_six_axis.h
+ hle/service/hid/controllers/six_axis.cpp
+ hle/service/hid/controllers/six_axis.h
hle/service/hid/controllers/stubbed.cpp
hle/service/hid/controllers/stubbed.h
hle/service/hid/controllers/touchscreen.cpp
@@ -715,6 +730,7 @@ add_library(core STATIC
hle/service/nvnflinger/producer_listener.h
hle/service/nvnflinger/status.h
hle/service/nvnflinger/ui/fence.h
+ hle/service/nvnflinger/ui/graphic_buffer.cpp
hle/service/nvnflinger/ui/graphic_buffer.h
hle/service/nvnflinger/window.h
hle/service/olsc/olsc.cpp
@@ -910,6 +926,22 @@ if (ENABLE_WEB_SERVICE)
target_link_libraries(core PRIVATE web_service)
endif()
+if (HAS_NCE)
+ enable_language(C ASM)
+ set(CMAKE_ASM_FLAGS "${CFLAGS} -x assembler-with-cpp")
+
+ target_sources(core PRIVATE
+ arm/nce/arm_nce.cpp
+ arm/nce/arm_nce.h
+ arm/nce/arm_nce.s
+ arm/nce/guest_context.h
+ arm/nce/patcher.cpp
+ arm/nce/patcher.h
+ arm/nce/instructions.h
+ )
+ target_link_libraries(core PRIVATE merry::oaknut)
+endif()
+
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
target_sources(core PRIVATE
arm/dynarmic/arm_dynarmic.h
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 0c012f094..d231bf89c 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -86,9 +86,9 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt
std::map<std::string, Symbols::Symbols> symbols;
for (const auto& module : modules) {
- symbols.insert_or_assign(
- module.second, Symbols::GetSymbols(module.first, system.ApplicationMemory(),
- system.ApplicationProcess()->Is64BitProcess()));
+ symbols.insert_or_assign(module.second,
+ Symbols::GetSymbols(module.first, system.ApplicationMemory(),
+ system.ApplicationProcess()->Is64Bit()));
}
for (auto& entry : out) {
@@ -153,6 +153,14 @@ void ARM_Interface::Run() {
Kernel::KThread* current_thread{Kernel::GetCurrentThreadPointer(system.Kernel())};
HaltReason hr{};
+ // If the thread is scheduled for termination, exit the thread.
+ if (current_thread->HasDpc()) {
+ if (current_thread->IsTerminationRequested()) {
+ current_thread->Exit();
+ UNREACHABLE();
+ }
+ }
+
// Notify the debugger and go to sleep if a step was performed
// and this thread has been scheduled again.
if (current_thread->GetStepState() == StepState::StepPerformed) {
@@ -174,14 +182,6 @@ void ARM_Interface::Run() {
}
system.ExitCPUProfile();
- // If the thread is scheduled for termination, exit the thread.
- if (current_thread->HasDpc()) {
- if (current_thread->IsTerminationRequested()) {
- current_thread->Exit();
- UNREACHABLE();
- }
- }
-
// Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason.
if (True(hr & HaltReason::InstructionBreakpoint) || True(hr & HaltReason::PrefetchAbort)) {
@@ -201,6 +201,8 @@ void ARM_Interface::Run() {
if (True(hr & HaltReason::DataAbort)) {
if (system.DebuggerEnabled()) {
system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint());
+ } else {
+ LogBacktrace();
}
current_thread->RequestSuspend(SuspendType::Debug);
break;
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 3d866ff6f..a9d9ac09d 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -81,6 +81,9 @@ public:
// thread context to be 800 bytes in size.
static_assert(sizeof(ThreadContext64) == 0x320);
+ /// Perform any backend-specific initialization.
+ virtual void Initialize() {}
+
/// Runs the CPU until an event happens
void Run();
diff --git a/src/core/arm/nce/arm_nce.cpp b/src/core/arm/nce/arm_nce.cpp
new file mode 100644
index 000000000..f7bdafd39
--- /dev/null
+++ b/src/core/arm/nce/arm_nce.cpp
@@ -0,0 +1,400 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <cinttypes>
+#include <memory>
+
+#include "common/signal_chain.h"
+#include "core/arm/nce/arm_nce.h"
+#include "core/arm/nce/patcher.h"
+#include "core/core.h"
+#include "core/memory.h"
+
+#include "core/hle/kernel/k_process.h"
+
+#include <signal.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+namespace Core {
+
+namespace {
+
+struct sigaction g_orig_action;
+
+// Verify assembly offsets.
+using NativeExecutionParameters = Kernel::KThread::NativeExecutionParameters;
+static_assert(offsetof(NativeExecutionParameters, native_context) == TpidrEl0NativeContext);
+static_assert(offsetof(NativeExecutionParameters, lock) == TpidrEl0Lock);
+static_assert(offsetof(NativeExecutionParameters, magic) == TpidrEl0TlsMagic);
+
+fpsimd_context* GetFloatingPointState(mcontext_t& host_ctx) {
+ _aarch64_ctx* header = reinterpret_cast<_aarch64_ctx*>(&host_ctx.__reserved);
+ while (header->magic != FPSIMD_MAGIC) {
+ header = reinterpret_cast<_aarch64_ctx*>(reinterpret_cast<char*>(header) + header->size);
+ }
+ return reinterpret_cast<fpsimd_context*>(header);
+}
+
+} // namespace
+
+void* ARM_NCE::RestoreGuestContext(void* raw_context) {
+ // Retrieve the host context.
+ auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
+
+ // Thread-local parameters will be located in x9.
+ auto* tpidr = reinterpret_cast<NativeExecutionParameters*>(host_ctx.regs[9]);
+ auto* guest_ctx = static_cast<GuestContext*>(tpidr->native_context);
+
+ // Retrieve the host floating point state.
+ auto* fpctx = GetFloatingPointState(host_ctx);
+
+ // Save host callee-saved registers.
+ std::memcpy(guest_ctx->host_ctx.host_saved_vregs.data(), &fpctx->vregs[8],
+ sizeof(guest_ctx->host_ctx.host_saved_vregs));
+ std::memcpy(guest_ctx->host_ctx.host_saved_regs.data(), &host_ctx.regs[19],
+ sizeof(guest_ctx->host_ctx.host_saved_regs));
+
+ // Save stack pointer.
+ guest_ctx->host_ctx.host_sp = host_ctx.sp;
+
+ // Restore all guest state except tpidr_el0.
+ host_ctx.sp = guest_ctx->sp;
+ host_ctx.pc = guest_ctx->pc;
+ host_ctx.pstate = guest_ctx->pstate;
+ fpctx->fpcr = guest_ctx->fpcr;
+ fpctx->fpsr = guest_ctx->fpsr;
+ std::memcpy(host_ctx.regs, guest_ctx->cpu_registers.data(), sizeof(host_ctx.regs));
+ std::memcpy(fpctx->vregs, guest_ctx->vector_registers.data(), sizeof(fpctx->vregs));
+
+ // Return the new thread-local storage pointer.
+ return tpidr;
+}
+
+void ARM_NCE::SaveGuestContext(GuestContext* guest_ctx, void* raw_context) {
+ // Retrieve the host context.
+ auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
+
+ // Retrieve the host floating point state.
+ auto* fpctx = GetFloatingPointState(host_ctx);
+
+ // Save all guest registers except tpidr_el0.
+ std::memcpy(guest_ctx->cpu_registers.data(), host_ctx.regs, sizeof(host_ctx.regs));
+ std::memcpy(guest_ctx->vector_registers.data(), fpctx->vregs, sizeof(fpctx->vregs));
+ guest_ctx->fpsr = fpctx->fpsr;
+ guest_ctx->fpcr = fpctx->fpcr;
+ guest_ctx->pstate = static_cast<u32>(host_ctx.pstate);
+ guest_ctx->pc = host_ctx.pc;
+ guest_ctx->sp = host_ctx.sp;
+
+ // Restore stack pointer.
+ host_ctx.sp = guest_ctx->host_ctx.host_sp;
+
+ // Restore host callee-saved registers.
+ std::memcpy(&host_ctx.regs[19], guest_ctx->host_ctx.host_saved_regs.data(),
+ sizeof(guest_ctx->host_ctx.host_saved_regs));
+ std::memcpy(&fpctx->vregs[8], guest_ctx->host_ctx.host_saved_vregs.data(),
+ sizeof(guest_ctx->host_ctx.host_saved_vregs));
+
+ // Return from the call on exit by setting pc to x30.
+ host_ctx.pc = guest_ctx->host_ctx.host_saved_regs[11];
+
+ // Clear esr_el1 and return it.
+ host_ctx.regs[0] = guest_ctx->esr_el1.exchange(0);
+}
+
+bool ARM_NCE::HandleGuestFault(GuestContext* guest_ctx, void* raw_info, void* raw_context) {
+ auto& host_ctx = static_cast<ucontext_t*>(raw_context)->uc_mcontext;
+ auto* info = static_cast<siginfo_t*>(raw_info);
+
+ // Try to handle an invalid access.
+ // TODO: handle accesses which split a page?
+ const Common::ProcessAddress addr =
+ (reinterpret_cast<u64>(info->si_addr) & ~Memory::YUZU_PAGEMASK);
+ if (guest_ctx->system->ApplicationMemory().InvalidateNCE(addr, Memory::YUZU_PAGESIZE)) {
+ // We handled the access successfully and are returning to guest code.
+ return true;
+ }
+
+ // We can't handle the access, so determine why we crashed.
+ const bool is_prefetch_abort = host_ctx.pc == reinterpret_cast<u64>(info->si_addr);
+
+ // For data aborts, skip the instruction and return to guest code.
+ // This will allow games to continue in many scenarios where they would otherwise crash.
+ if (!is_prefetch_abort) {
+ host_ctx.pc += 4;
+ return true;
+ }
+
+ // This is a prefetch abort.
+ guest_ctx->esr_el1.fetch_or(static_cast<u64>(HaltReason::PrefetchAbort));
+
+ // Forcibly mark the context as locked. We are still running.
+ // We may race with SignalInterrupt here:
+ // - If we lose the race, then SignalInterrupt will send us a signal we are masking,
+ // and it will do nothing when it is unmasked, as we have already left guest code.
+ // - If we win the race, then SignalInterrupt will wait for us to unlock first.
+ auto& thread_params = guest_ctx->parent->running_thread->GetNativeExecutionParameters();
+ thread_params.lock.store(SpinLockLocked);
+
+ // Return to host.
+ SaveGuestContext(guest_ctx, raw_context);
+ return false;
+}
+
+void ARM_NCE::HandleHostFault(int sig, void* raw_info, void* raw_context) {
+ return g_orig_action.sa_sigaction(sig, static_cast<siginfo_t*>(raw_info), raw_context);
+}
+
+HaltReason ARM_NCE::RunJit() {
+ // Get the thread parameters.
+ // TODO: pass the current thread down from ::Run
+ auto* thread = Kernel::GetCurrentThreadPointer(system.Kernel());
+ auto* thread_params = &thread->GetNativeExecutionParameters();
+
+ {
+ // Lock our core context.
+ std::scoped_lock lk{lock};
+
+ // We should not be running.
+ ASSERT(running_thread == nullptr);
+
+ // Check if we need to run. If we have already been halted, we are done.
+ u64 halt = guest_ctx.esr_el1.exchange(0);
+ if (halt != 0) {
+ return static_cast<HaltReason>(halt);
+ }
+
+ // Mark that we are running.
+ running_thread = thread;
+
+ // Acquire the lock on the thread parameters.
+ // This allows us to force synchronization with SignalInterrupt.
+ LockThreadParameters(thread_params);
+ }
+
+ // Assign current members.
+ guest_ctx.parent = this;
+ thread_params->native_context = &guest_ctx;
+ thread_params->tpidr_el0 = guest_ctx.tpidr_el0;
+ thread_params->tpidrro_el0 = guest_ctx.tpidrro_el0;
+ thread_params->is_running = true;
+
+ HaltReason halt{};
+
+ // TODO: finding and creating the post handler needs to be locked
+ // to deal with dynamic loading of NROs.
+ const auto& post_handlers = system.ApplicationProcess()->GetPostHandlers();
+ if (auto it = post_handlers.find(guest_ctx.pc); it != post_handlers.end()) {
+ halt = ReturnToRunCodeByTrampoline(thread_params, &guest_ctx, it->second);
+ } else {
+ halt = ReturnToRunCodeByExceptionLevelChange(thread_id, thread_params);
+ }
+
+ // Unload members.
+ // The thread does not change, so we can persist the old reference.
+ guest_ctx.tpidr_el0 = thread_params->tpidr_el0;
+ thread_params->native_context = nullptr;
+ thread_params->is_running = false;
+
+ // Unlock the thread parameters.
+ UnlockThreadParameters(thread_params);
+
+ {
+ // Lock the core context.
+ std::scoped_lock lk{lock};
+
+ // On exit, we no longer have an active thread.
+ running_thread = nullptr;
+ }
+
+ // Return the halt reason.
+ return halt;
+}
+
+HaltReason ARM_NCE::StepJit() {
+ return HaltReason::StepThread;
+}
+
+u32 ARM_NCE::GetSvcNumber() const {
+ return guest_ctx.svc_swi;
+}
+
+ARM_NCE::ARM_NCE(System& system_, bool uses_wall_clock_, std::size_t core_index_)
+ : ARM_Interface{system_, uses_wall_clock_}, core_index{core_index_} {
+ guest_ctx.system = &system_;
+}
+
+ARM_NCE::~ARM_NCE() = default;
+
+void ARM_NCE::Initialize() {
+ thread_id = gettid();
+
+ // Setup our signals
+ static std::once_flag flag;
+ std::call_once(flag, [] {
+ using HandlerType = decltype(sigaction::sa_sigaction);
+
+ sigset_t signal_mask;
+ sigemptyset(&signal_mask);
+ sigaddset(&signal_mask, ReturnToRunCodeByExceptionLevelChangeSignal);
+ sigaddset(&signal_mask, BreakFromRunCodeSignal);
+ sigaddset(&signal_mask, GuestFaultSignal);
+
+ struct sigaction return_to_run_code_action {};
+ return_to_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ return_to_run_code_action.sa_sigaction = reinterpret_cast<HandlerType>(
+ &ARM_NCE::ReturnToRunCodeByExceptionLevelChangeSignalHandler);
+ return_to_run_code_action.sa_mask = signal_mask;
+ Common::SigAction(ReturnToRunCodeByExceptionLevelChangeSignal, &return_to_run_code_action,
+ nullptr);
+
+ struct sigaction break_from_run_code_action {};
+ break_from_run_code_action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ break_from_run_code_action.sa_sigaction =
+ reinterpret_cast<HandlerType>(&ARM_NCE::BreakFromRunCodeSignalHandler);
+ break_from_run_code_action.sa_mask = signal_mask;
+ Common::SigAction(BreakFromRunCodeSignal, &break_from_run_code_action, nullptr);
+
+ struct sigaction fault_action {};
+ fault_action.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART;
+ fault_action.sa_sigaction =
+ reinterpret_cast<HandlerType>(&ARM_NCE::GuestFaultSignalHandler);
+ fault_action.sa_mask = signal_mask;
+ Common::SigAction(GuestFaultSignal, &fault_action, &g_orig_action);
+
+ // Simplify call for g_orig_action.
+ // These fields occupy the same space in memory, so this should be a no-op in practice.
+ if (!(g_orig_action.sa_flags & SA_SIGINFO)) {
+ g_orig_action.sa_sigaction =
+ reinterpret_cast<decltype(g_orig_action.sa_sigaction)>(g_orig_action.sa_handler);
+ }
+ });
+}
+
+void ARM_NCE::SetPC(u64 pc) {
+ guest_ctx.pc = pc;
+}
+
+u64 ARM_NCE::GetPC() const {
+ return guest_ctx.pc;
+}
+
+u64 ARM_NCE::GetSP() const {
+ return guest_ctx.sp;
+}
+
+u64 ARM_NCE::GetReg(int index) const {
+ return guest_ctx.cpu_registers[index];
+}
+
+void ARM_NCE::SetReg(int index, u64 value) {
+ guest_ctx.cpu_registers[index] = value;
+}
+
+u128 ARM_NCE::GetVectorReg(int index) const {
+ return guest_ctx.vector_registers[index];
+}
+
+void ARM_NCE::SetVectorReg(int index, u128 value) {
+ guest_ctx.vector_registers[index] = value;
+}
+
+u32 ARM_NCE::GetPSTATE() const {
+ return guest_ctx.pstate;
+}
+
+void ARM_NCE::SetPSTATE(u32 pstate) {
+ guest_ctx.pstate = pstate;
+}
+
+u64 ARM_NCE::GetTlsAddress() const {
+ return guest_ctx.tpidrro_el0;
+}
+
+void ARM_NCE::SetTlsAddress(u64 address) {
+ guest_ctx.tpidrro_el0 = address;
+}
+
+u64 ARM_NCE::GetTPIDR_EL0() const {
+ return guest_ctx.tpidr_el0;
+}
+
+void ARM_NCE::SetTPIDR_EL0(u64 value) {
+ guest_ctx.tpidr_el0 = value;
+}
+
+void ARM_NCE::SaveContext(ThreadContext64& ctx) const {
+ ctx.cpu_registers = guest_ctx.cpu_registers;
+ ctx.sp = guest_ctx.sp;
+ ctx.pc = guest_ctx.pc;
+ ctx.pstate = guest_ctx.pstate;
+ ctx.vector_registers = guest_ctx.vector_registers;
+ ctx.fpcr = guest_ctx.fpcr;
+ ctx.fpsr = guest_ctx.fpsr;
+ ctx.tpidr = guest_ctx.tpidr_el0;
+}
+
+void ARM_NCE::LoadContext(const ThreadContext64& ctx) {
+ guest_ctx.cpu_registers = ctx.cpu_registers;
+ guest_ctx.sp = ctx.sp;
+ guest_ctx.pc = ctx.pc;
+ guest_ctx.pstate = ctx.pstate;
+ guest_ctx.vector_registers = ctx.vector_registers;
+ guest_ctx.fpcr = ctx.fpcr;
+ guest_ctx.fpsr = ctx.fpsr;
+ guest_ctx.tpidr_el0 = ctx.tpidr;
+}
+
+void ARM_NCE::SignalInterrupt() {
+ // Lock core context.
+ std::scoped_lock lk{lock};
+
+ // Add break loop condition.
+ guest_ctx.esr_el1.fetch_or(static_cast<u64>(HaltReason::BreakLoop));
+
+ // If there is no thread running, we are done.
+ if (running_thread == nullptr) {
+ return;
+ }
+
+ // Lock the thread context.
+ auto* params = &running_thread->GetNativeExecutionParameters();
+ LockThreadParameters(params);
+
+ if (params->is_running) {
+ // We should signal to the running thread.
+ // The running thread will unlock the thread context.
+ syscall(SYS_tkill, thread_id, BreakFromRunCodeSignal);
+ } else {
+ // If the thread is no longer running, we have nothing to do.
+ UnlockThreadParameters(params);
+ }
+}
+
+void ARM_NCE::ClearInterrupt() {
+ guest_ctx.esr_el1 = {};
+}
+
+void ARM_NCE::ClearInstructionCache() {
+ // TODO: This is not possible to implement correctly on Linux because
+ // we do not have any access to ic iallu.
+
+ // Require accesses to complete.
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+}
+
+void ARM_NCE::InvalidateCacheRange(u64 addr, std::size_t size) {
+ this->ClearInstructionCache();
+}
+
+void ARM_NCE::ClearExclusiveState() {
+ // No-op.
+}
+
+void ARM_NCE::PageTableChanged(Common::PageTable& page_table,
+ std::size_t new_address_space_size_in_bits) {
+ // No-op. Page table is never used.
+}
+
+} // namespace Core
diff --git a/src/core/arm/nce/arm_nce.h b/src/core/arm/nce/arm_nce.h
new file mode 100644
index 000000000..5fbd6dbf3
--- /dev/null
+++ b/src/core/arm/nce/arm_nce.h
@@ -0,0 +1,108 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <atomic>
+#include <memory>
+#include <span>
+#include <unordered_map>
+#include <vector>
+
+#include "core/arm/arm_interface.h"
+#include "core/arm/nce/guest_context.h"
+
+namespace Core::Memory {
+class Memory;
+}
+
+namespace Core {
+
+class System;
+
+class ARM_NCE final : public ARM_Interface {
+public:
+ ARM_NCE(System& system_, bool uses_wall_clock_, std::size_t core_index_);
+
+ ~ARM_NCE() override;
+
+ void Initialize() override;
+ void SetPC(u64 pc) override;
+ u64 GetPC() const override;
+ u64 GetSP() const override;
+ u64 GetReg(int index) const override;
+ void SetReg(int index, u64 value) override;
+ u128 GetVectorReg(int index) const override;
+ void SetVectorReg(int index, u128 value) override;
+
+ u32 GetPSTATE() const override;
+ void SetPSTATE(u32 pstate) override;
+ u64 GetTlsAddress() const override;
+ void SetTlsAddress(u64 address) override;
+ void SetTPIDR_EL0(u64 value) override;
+ u64 GetTPIDR_EL0() const override;
+
+ Architecture GetArchitecture() const override {
+ return Architecture::Aarch64;
+ }
+
+ void SaveContext(ThreadContext32& ctx) const override {}
+ void SaveContext(ThreadContext64& ctx) const override;
+ void LoadContext(const ThreadContext32& ctx) override {}
+ void LoadContext(const ThreadContext64& ctx) override;
+
+ void SignalInterrupt() override;
+ void ClearInterrupt() override;
+ void ClearExclusiveState() override;
+ void ClearInstructionCache() override;
+ void InvalidateCacheRange(u64 addr, std::size_t size) override;
+ void PageTableChanged(Common::PageTable& new_page_table,
+ std::size_t new_address_space_size_in_bits) override;
+
+protected:
+ HaltReason RunJit() override;
+ HaltReason StepJit() override;
+
+ u32 GetSvcNumber() const override;
+
+ const Kernel::DebugWatchpoint* HaltedWatchpoint() const override {
+ return nullptr;
+ }
+
+ void RewindBreakpointInstruction() override {}
+
+private:
+ // Assembly definitions.
+ static HaltReason ReturnToRunCodeByTrampoline(void* tpidr, GuestContext* ctx,
+ u64 trampoline_addr);
+ static HaltReason ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr);
+
+ static void ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* info,
+ void* raw_context);
+ static void BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context);
+ static void GuestFaultSignalHandler(int sig, void* info, void* raw_context);
+
+ static void LockThreadParameters(void* tpidr);
+ static void UnlockThreadParameters(void* tpidr);
+
+private:
+ // C++ implementation functions for assembly definitions.
+ static void* RestoreGuestContext(void* raw_context);
+ static void SaveGuestContext(GuestContext* ctx, void* raw_context);
+ static bool HandleGuestFault(GuestContext* ctx, void* info, void* raw_context);
+ static void HandleHostFault(int sig, void* info, void* raw_context);
+
+public:
+ // Members set on initialization.
+ std::size_t core_index{};
+ pid_t thread_id{-1};
+
+ // Core context.
+ GuestContext guest_ctx;
+
+ // Thread and invalidation info.
+ std::mutex lock;
+ Kernel::KThread* running_thread{};
+};
+
+} // namespace Core
diff --git a/src/core/arm/nce/arm_nce.s b/src/core/arm/nce/arm_nce.s
new file mode 100644
index 000000000..b98e09f31
--- /dev/null
+++ b/src/core/arm/nce/arm_nce.s
@@ -0,0 +1,222 @@
+/* SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "core/arm/nce/arm_nce_asm_definitions.h"
+
+#define LOAD_IMMEDIATE_32(reg, val) \
+ mov reg, #(((val) >> 0x00) & 0xFFFF); \
+ movk reg, #(((val) >> 0x10) & 0xFFFF), lsl #16
+
+
+/* static HaltReason Core::ARM_NCE::ReturnToRunCodeByTrampoline(void* tpidr, Core::GuestContext* ctx, u64 trampoline_addr) */
+.section .text._ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, "ax", %progbits
+.global _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm
+.type _ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm, %function
+_ZN4Core7ARM_NCE27ReturnToRunCodeByTrampolineEPvPNS_12GuestContextEm:
+ /* Back up host sp to x3. */
+ /* Back up host tpidr_el0 to x4. */
+ mov x3, sp
+ mrs x4, tpidr_el0
+
+ /* Load guest sp. x5 is used as a scratch register. */
+ ldr x5, [x1, #(GuestContextSp)]
+ mov sp, x5
+
+ /* Offset GuestContext pointer to the host member. */
+ add x5, x1, #(GuestContextHostContext)
+
+ /* Save original host sp and tpidr_el0 (x3, x4) to host context. */
+ stp x3, x4, [x5, #(HostContextSpTpidrEl0)]
+
+ /* Save all callee-saved host GPRs. */
+ stp x19, x20, [x5, #(HostContextRegs+0x0)]
+ stp x21, x22, [x5, #(HostContextRegs+0x10)]
+ stp x23, x24, [x5, #(HostContextRegs+0x20)]
+ stp x25, x26, [x5, #(HostContextRegs+0x30)]
+ stp x27, x28, [x5, #(HostContextRegs+0x40)]
+ stp x29, x30, [x5, #(HostContextRegs+0x50)]
+
+ /* Save all callee-saved host FPRs. */
+ stp q8, q9, [x5, #(HostContextVregs+0x0)]
+ stp q10, q11, [x5, #(HostContextVregs+0x20)]
+ stp q12, q13, [x5, #(HostContextVregs+0x40)]
+ stp q14, q15, [x5, #(HostContextVregs+0x60)]
+
+ /* Load guest tpidr_el0 from argument. */
+ msr tpidr_el0, x0
+
+ /* Tail call the trampoline to restore guest state. */
+ br x2
+
+
+/* static HaltReason Core::ARM_NCE::ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr) */
+.section .text._ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv, "ax", %progbits
+.global _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv
+.type _ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv, %function
+_ZN4Core7ARM_NCE37ReturnToRunCodeByExceptionLevelChangeEiPv:
+ /* This jumps to the signal handler, which will restore the entire context. */
+ /* On entry, x0 = thread id, which is already in the right place. */
+
+ /* Move tpidr to x9 so it is not trampled. */
+ mov x9, x1
+
+ /* Set up arguments. */
+ mov x8, #(__NR_tkill)
+ mov x1, #(ReturnToRunCodeByExceptionLevelChangeSignal)
+
+ /* Tail call the signal handler. */
+ svc #0
+
+ /* Block execution from flowing here. */
+ brk #1000
+
+
+/* static void Core::ARM_NCE::ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* info, void* raw_context) */
+.section .text._ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, "ax", %progbits
+.global _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_
+.type _ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_, %function
+_ZN4Core7ARM_NCE50ReturnToRunCodeByExceptionLevelChangeSignalHandlerEiPvS1_:
+ stp x29, x30, [sp, #-0x10]!
+ mov x29, sp
+
+ /* Call the context restorer with the raw context. */
+ mov x0, x2
+ bl _ZN4Core7ARM_NCE19RestoreGuestContextEPv
+
+ /* Save the old value of tpidr_el0. */
+ mrs x8, tpidr_el0
+ ldr x9, [x0, #(TpidrEl0NativeContext)]
+ str x8, [x9, #(GuestContextHostContext + HostContextTpidrEl0)]
+
+ /* Set our new tpidr_el0. */
+ msr tpidr_el0, x0
+
+ /* Unlock the context. */
+ bl _ZN4Core7ARM_NCE22UnlockThreadParametersEPv
+
+ /* Returning from here will enter the guest. */
+ ldp x29, x30, [sp], #0x10
+ ret
+
+
+/* static void Core::ARM_NCE::BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context) */
+.section .text._ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_, "ax", %progbits
+.global _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_
+.type _ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_, %function
+_ZN4Core7ARM_NCE29BreakFromRunCodeSignalHandlerEiPvS1_:
+ /* Check to see if we have the correct TLS magic. */
+ mrs x8, tpidr_el0
+ ldr w9, [x8, #(TpidrEl0TlsMagic)]
+
+ LOAD_IMMEDIATE_32(w10, TlsMagic)
+
+ cmp w9, w10
+ b.ne 1f
+
+ /* Correct TLS magic, so this is a guest interrupt. */
+ /* Restore host tpidr_el0. */
+ ldr x0, [x8, #(TpidrEl0NativeContext)]
+ ldr x3, [x0, #(GuestContextHostContext + HostContextTpidrEl0)]
+ msr tpidr_el0, x3
+
+ /* Tail call the restorer. */
+ mov x1, x2
+ b _ZN4Core7ARM_NCE16SaveGuestContextEPNS_12GuestContextEPv
+
+ /* Returning from here will enter host code. */
+
+1:
+ /* Incorrect TLS magic, so this is a spurious signal. */
+ ret
+
+
+/* static void Core::ARM_NCE::GuestFaultSignalHandler(int sig, void* info, void* raw_context) */
+.section .text._ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_, "ax", %progbits
+.global _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_
+.type _ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_, %function
+_ZN4Core7ARM_NCE23GuestFaultSignalHandlerEiPvS1_:
+ /* Check to see if we have the correct TLS magic. */
+ mrs x8, tpidr_el0
+ ldr w9, [x8, #(TpidrEl0TlsMagic)]
+
+ LOAD_IMMEDIATE_32(w10, TlsMagic)
+
+ cmp w9, w10
+ b.eq 1f
+
+ /* Incorrect TLS magic, so this is a host fault. */
+ /* Tail call the handler. */
+ b _ZN4Core7ARM_NCE15HandleHostFaultEiPvS1_
+
+1:
+ /* Correct TLS magic, so this is a guest fault. */
+ stp x29, x30, [sp, #-0x20]!
+ str x19, [sp, #0x10]
+ mov x29, sp
+
+ /* Save the old tpidr_el0. */
+ mov x19, x8
+
+ /* Restore host tpidr_el0. */
+ ldr x0, [x8, #(TpidrEl0NativeContext)]
+ ldr x3, [x0, #(GuestContextHostContext + HostContextTpidrEl0)]
+ msr tpidr_el0, x3
+
+ /* Call the handler. */
+ bl _ZN4Core7ARM_NCE16HandleGuestFaultEPNS_12GuestContextEPvS3_
+
+ /* If the handler returned false, we want to preserve the host tpidr_el0. */
+ cbz x0, 2f
+
+ /* Otherwise, restore guest tpidr_el0. */
+ msr tpidr_el0, x19
+
+2:
+ ldr x19, [sp, #0x10]
+ ldp x29, x30, [sp], #0x20
+ ret
+
+
+/* static void Core::ARM_NCE::LockThreadParameters(void* tpidr) */
+.section .text._ZN4Core7ARM_NCE20LockThreadParametersEPv, "ax", %progbits
+.global _ZN4Core7ARM_NCE20LockThreadParametersEPv
+.type _ZN4Core7ARM_NCE20LockThreadParametersEPv, %function
+_ZN4Core7ARM_NCE20LockThreadParametersEPv:
+ /* Offset to lock member. */
+ add x0, x0, #(TpidrEl0Lock)
+
+1:
+ /* Clear the monitor. */
+ clrex
+
+2:
+ /* Load-linked with acquire ordering. */
+ ldaxr w1, [x0]
+
+ /* If the value was SpinLockLocked, clear monitor and retry. */
+ cbz w1, 1b
+
+ /* Store-conditional SpinLockLocked with relaxed ordering. */
+ stxr w1, wzr, [x0]
+
+ /* If we failed to store, retry. */
+ cbnz w1, 2b
+
+ ret
+
+
+/* static void Core::ARM_NCE::UnlockThreadParameters(void* tpidr) */
+.section .text._ZN4Core7ARM_NCE22UnlockThreadParametersEPv, "ax", %progbits
+.global _ZN4Core7ARM_NCE22UnlockThreadParametersEPv
+.type _ZN4Core7ARM_NCE22UnlockThreadParametersEPv, %function
+_ZN4Core7ARM_NCE22UnlockThreadParametersEPv:
+ /* Offset to lock member. */
+ add x0, x0, #(TpidrEl0Lock)
+
+ /* Load SpinLockUnlocked. */
+ mov w1, #(SpinLockUnlocked)
+
+ /* Store value with release ordering. */
+ stlr w1, [x0]
+
+ ret
diff --git a/src/core/arm/nce/arm_nce_asm_definitions.h b/src/core/arm/nce/arm_nce_asm_definitions.h
new file mode 100644
index 000000000..8a9b285b5
--- /dev/null
+++ b/src/core/arm/nce/arm_nce_asm_definitions.h
@@ -0,0 +1,29 @@
+/* SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#pragma once
+
+#define __ASSEMBLY__
+
+#include <asm-generic/signal.h>
+#include <asm-generic/unistd.h>
+
+#define ReturnToRunCodeByExceptionLevelChangeSignal SIGUSR2
+#define BreakFromRunCodeSignal SIGURG
+#define GuestFaultSignal SIGSEGV
+
+#define GuestContextSp 0xF8
+#define GuestContextHostContext 0x320
+
+#define HostContextSpTpidrEl0 0xE0
+#define HostContextTpidrEl0 0xE8
+#define HostContextRegs 0x0
+#define HostContextVregs 0x60
+
+#define TpidrEl0NativeContext 0x10
+#define TpidrEl0Lock 0x18
+#define TpidrEl0TlsMagic 0x20
+#define TlsMagic 0x555a5559
+
+#define SpinLockLocked 0
+#define SpinLockUnlocked 1
diff --git a/src/core/arm/nce/guest_context.h b/src/core/arm/nce/guest_context.h
new file mode 100644
index 000000000..0767a0337
--- /dev/null
+++ b/src/core/arm/nce/guest_context.h
@@ -0,0 +1,50 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+#include "core/arm/arm_interface.h"
+#include "core/arm/nce/arm_nce_asm_definitions.h"
+
+namespace Core {
+
+class ARM_NCE;
+class System;
+
+struct HostContext {
+ alignas(16) std::array<u64, 12> host_saved_regs{};
+ alignas(16) std::array<u128, 8> host_saved_vregs{};
+ u64 host_sp{};
+ void* host_tpidr_el0{};
+};
+
+struct GuestContext {
+ std::array<u64, 31> cpu_registers{};
+ u64 sp{};
+ u64 pc{};
+ u32 fpcr{};
+ u32 fpsr{};
+ std::array<u128, 32> vector_registers{};
+ u32 pstate{};
+ alignas(16) HostContext host_ctx{};
+ u64 tpidrro_el0{};
+ u64 tpidr_el0{};
+ std::atomic<u64> esr_el1{};
+ u32 nzcv{};
+ u32 svc_swi{};
+ System* system{};
+ ARM_NCE* parent{};
+};
+
+// Verify assembly offsets.
+static_assert(offsetof(GuestContext, sp) == GuestContextSp);
+static_assert(offsetof(GuestContext, host_ctx) == GuestContextHostContext);
+static_assert(offsetof(HostContext, host_sp) == HostContextSpTpidrEl0);
+static_assert(offsetof(HostContext, host_tpidr_el0) - 8 == HostContextSpTpidrEl0);
+static_assert(offsetof(HostContext, host_tpidr_el0) == HostContextTpidrEl0);
+static_assert(offsetof(HostContext, host_saved_regs) == HostContextRegs);
+static_assert(offsetof(HostContext, host_saved_vregs) == HostContextVregs);
+
+} // namespace Core
diff --git a/src/core/arm/nce/instructions.h b/src/core/arm/nce/instructions.h
new file mode 100644
index 000000000..5b56ff857
--- /dev/null
+++ b/src/core/arm/nce/instructions.h
@@ -0,0 +1,147 @@
+// SPDX-FileCopyrightText: Copyright © 2020 Skyline Team and Contributors
+// SPDX-License-Identifier: MPL-2.0
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+
+namespace Core::NCE {
+
+enum SystemRegister : u32 {
+ TpidrEl0 = 0x5E82,
+ TpidrroEl0 = 0x5E83,
+ CntfrqEl0 = 0x5F00,
+ CntpctEl0 = 0x5F01,
+};
+
+// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/SVC--Supervisor-Call-
+union SVC {
+ constexpr explicit SVC(u32 raw_) : raw{raw_} {}
+
+ constexpr bool Verify() {
+ return (this->GetSig0() == 0x1 && this->GetSig1() == 0x6A0);
+ }
+
+ constexpr u32 GetSig0() {
+ return decltype(sig0)::ExtractValue(raw);
+ }
+
+ constexpr u32 GetValue() {
+ return decltype(value)::ExtractValue(raw);
+ }
+
+ constexpr u32 GetSig1() {
+ return decltype(sig1)::ExtractValue(raw);
+ }
+
+ u32 raw;
+
+private:
+ BitField<0, 5, u32> sig0; // 0x1
+ BitField<5, 16, u32> value; // 16-bit immediate
+ BitField<21, 11, u32> sig1; // 0x6A0
+};
+static_assert(sizeof(SVC) == sizeof(u32));
+static_assert(SVC(0xD40000C1).Verify());
+static_assert(SVC(0xD40000C1).GetValue() == 0x6);
+
+// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/MRS--Move-System-Register-
+union MRS {
+ constexpr explicit MRS(u32 raw_) : raw{raw_} {}
+
+ constexpr bool Verify() {
+ return (this->GetSig() == 0xD53);
+ }
+
+ constexpr u32 GetRt() {
+ return decltype(rt)::ExtractValue(raw);
+ }
+
+ constexpr u32 GetSystemReg() {
+ return decltype(system_reg)::ExtractValue(raw);
+ }
+
+ constexpr u32 GetSig() {
+ return decltype(sig)::ExtractValue(raw);
+ }
+
+ u32 raw;
+
+private:
+ BitField<0, 5, u32> rt; // destination register
+ BitField<5, 15, u32> system_reg; // source system register
+ BitField<20, 12, u32> sig; // 0xD53
+};
+static_assert(sizeof(MRS) == sizeof(u32));
+static_assert(MRS(0xD53BE020).Verify());
+static_assert(MRS(0xD53BE020).GetSystemReg() == CntpctEl0);
+static_assert(MRS(0xD53BE020).GetRt() == 0x0);
+
+// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/MSR--register---Move-general-purpose-register-to-System-Register-
+union MSR {
+ constexpr explicit MSR(u32 raw_) : raw{raw_} {}
+
+ constexpr bool Verify() {
+ return this->GetSig() == 0xD51;
+ }
+
+ constexpr u32 GetRt() {
+ return decltype(rt)::ExtractValue(raw);
+ }
+
+ constexpr u32 GetSystemReg() {
+ return decltype(system_reg)::ExtractValue(raw);
+ }
+
+ constexpr u32 GetSig() {
+ return decltype(sig)::ExtractValue(raw);
+ }
+
+ u32 raw;
+
+private:
+ BitField<0, 5, u32> rt; // source register
+ BitField<5, 15, u32> system_reg; // destination system register
+ BitField<20, 12, u32> sig; // 0xD51
+};
+static_assert(sizeof(MSR) == sizeof(u32));
+static_assert(MSR(0xD51BD040).Verify());
+static_assert(MSR(0xD51BD040).GetSystemReg() == TpidrEl0);
+static_assert(MSR(0xD51BD040).GetRt() == 0x0);
+
+// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDXR--Load-Exclusive-Register-
+// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/LDXP--Load-Exclusive-Pair-of-Registers-
+// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/STXR--Store-Exclusive-Register-
+// https://developer.arm.com/documentation/ddi0596/2021-12/Base-Instructions/STXP--Store-Exclusive-Pair-of-registers-
+union Exclusive {
+ constexpr explicit Exclusive(u32 raw_) : raw{raw_} {}
+
+ constexpr bool Verify() {
+ return this->GetSig() == 0x10;
+ }
+
+ constexpr u32 GetSig() {
+ return decltype(sig)::ExtractValue(raw);
+ }
+
+ constexpr u32 AsOrdered() {
+ return raw | decltype(o0)::FormatValue(1);
+ }
+
+ u32 raw;
+
+private:
+ BitField<0, 5, u32> rt; // memory operand
+ BitField<5, 5, u32> rn; // register operand 1
+ BitField<10, 5, u32> rt2; // register operand 2
+ BitField<15, 1, u32> o0; // ordered
+ BitField<16, 5, u32> rs; // status register
+ BitField<21, 2, u32> l; // operation type
+ BitField<23, 7, u32> sig; // 0x10
+ BitField<30, 2, u32> size; // size
+};
+static_assert(Exclusive(0xC85FFC00).Verify());
+static_assert(Exclusive(0xC85FFC00).AsOrdered() == 0xC85FFC00);
+static_assert(Exclusive(0xC85F7C00).AsOrdered() == 0xC85FFC00);
+static_assert(Exclusive(0xC8200440).AsOrdered() == 0xC8208440);
+
+} // namespace Core::NCE
diff --git a/src/core/arm/nce/patcher.cpp b/src/core/arm/nce/patcher.cpp
new file mode 100644
index 000000000..ec8527224
--- /dev/null
+++ b/src/core/arm/nce/patcher.cpp
@@ -0,0 +1,474 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/arm64/native_clock.h"
+#include "common/bit_cast.h"
+#include "common/literals.h"
+#include "core/arm/nce/arm_nce.h"
+#include "core/arm/nce/guest_context.h"
+#include "core/arm/nce/instructions.h"
+#include "core/arm/nce/patcher.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hle/kernel/svc.h"
+
+namespace Core::NCE {
+
+using namespace Common::Literals;
+using namespace oaknut::util;
+
+using NativeExecutionParameters = Kernel::KThread::NativeExecutionParameters;
+
+constexpr size_t MaxRelativeBranch = 128_MiB;
+constexpr u32 ModuleCodeIndex = 0x24 / sizeof(u32);
+
+Patcher::Patcher() : c(m_patch_instructions) {}
+
+Patcher::~Patcher() = default;
+
+void Patcher::PatchText(const Kernel::PhysicalMemory& program_image,
+ const Kernel::CodeSet::Segment& code) {
+
+ // Write save context helper function.
+ c.l(m_save_context);
+ WriteSaveContext();
+
+ // Write load context helper function.
+ c.l(m_load_context);
+ WriteLoadContext();
+
+ // Retrieve text segment data.
+ const auto text = std::span{program_image}.subspan(code.offset, code.size);
+ const auto text_words =
+ std::span<const u32>{reinterpret_cast<const u32*>(text.data()), text.size() / sizeof(u32)};
+
+ // Loop through instructions, patching as needed.
+ for (u32 i = ModuleCodeIndex; i < static_cast<u32>(text_words.size()); i++) {
+ const u32 inst = text_words[i];
+
+ const auto AddRelocations = [&] {
+ const uintptr_t this_offset = i * sizeof(u32);
+ const uintptr_t next_offset = this_offset + sizeof(u32);
+
+ // Relocate from here to patch.
+ this->BranchToPatch(this_offset);
+
+ // Relocate from patch to next instruction.
+ return next_offset;
+ };
+
+ // SVC
+ if (auto svc = SVC{inst}; svc.Verify()) {
+ WriteSvcTrampoline(AddRelocations(), svc.GetValue());
+ continue;
+ }
+
+ // MRS Xn, TPIDR_EL0
+ // MRS Xn, TPIDRRO_EL0
+ if (auto mrs = MRS{inst};
+ mrs.Verify() && (mrs.GetSystemReg() == TpidrroEl0 || mrs.GetSystemReg() == TpidrEl0)) {
+ const auto src_reg = mrs.GetSystemReg() == TpidrroEl0 ? oaknut::SystemReg::TPIDRRO_EL0
+ : oaknut::SystemReg::TPIDR_EL0;
+ const auto dest_reg = oaknut::XReg{static_cast<int>(mrs.GetRt())};
+ WriteMrsHandler(AddRelocations(), dest_reg, src_reg);
+ continue;
+ }
+
+ // MRS Xn, CNTPCT_EL0
+ if (auto mrs = MRS{inst}; mrs.Verify() && mrs.GetSystemReg() == CntpctEl0) {
+ WriteCntpctHandler(AddRelocations(), oaknut::XReg{static_cast<int>(mrs.GetRt())});
+ continue;
+ }
+
+ // MRS Xn, CNTFRQ_EL0
+ if (auto mrs = MRS{inst}; mrs.Verify() && mrs.GetSystemReg() == CntfrqEl0) {
+ UNREACHABLE();
+ }
+
+ // MSR TPIDR_EL0, Xn
+ if (auto msr = MSR{inst}; msr.Verify() && msr.GetSystemReg() == TpidrEl0) {
+ WriteMsrHandler(AddRelocations(), oaknut::XReg{static_cast<int>(msr.GetRt())});
+ continue;
+ }
+
+ if (auto exclusive = Exclusive{inst}; exclusive.Verify()) {
+ m_exclusives.push_back(i);
+ }
+ }
+
+ // Determine patching mode for the final relocation step
+ const size_t image_size = program_image.size();
+ this->mode = image_size > MaxRelativeBranch ? PatchMode::PreText : PatchMode::PostData;
+}
+
+void Patcher::RelocateAndCopy(Common::ProcessAddress load_base,
+ const Kernel::CodeSet::Segment& code,
+ Kernel::PhysicalMemory& program_image,
+ EntryTrampolines* out_trampolines) {
+ const size_t patch_size = GetSectionSize();
+ const size_t image_size = program_image.size();
+
+ // Retrieve text segment data.
+ const auto text = std::span{program_image}.subspan(code.offset, code.size);
+ const auto text_words =
+ std::span<u32>{reinterpret_cast<u32*>(text.data()), text.size() / sizeof(u32)};
+
+ const auto ApplyBranchToPatchRelocation = [&](u32* target, const Relocation& rel) {
+ oaknut::CodeGenerator rc{target};
+ if (mode == PatchMode::PreText) {
+ rc.B(rel.patch_offset - patch_size - rel.module_offset);
+ } else {
+ rc.B(image_size - rel.module_offset + rel.patch_offset);
+ }
+ };
+
+ const auto ApplyBranchToModuleRelocation = [&](u32* target, const Relocation& rel) {
+ oaknut::CodeGenerator rc{target};
+ if (mode == PatchMode::PreText) {
+ rc.B(patch_size - rel.patch_offset + rel.module_offset);
+ } else {
+ rc.B(rel.module_offset - image_size - rel.patch_offset);
+ }
+ };
+
+ const auto RebasePatch = [&](ptrdiff_t patch_offset) {
+ if (mode == PatchMode::PreText) {
+ return GetInteger(load_base) + patch_offset;
+ } else {
+ return GetInteger(load_base) + image_size + patch_offset;
+ }
+ };
+
+ const auto RebasePc = [&](uintptr_t module_offset) {
+ if (mode == PatchMode::PreText) {
+ return GetInteger(load_base) + patch_size + module_offset;
+ } else {
+ return GetInteger(load_base) + module_offset;
+ }
+ };
+
+ // We are now ready to relocate!
+ for (const Relocation& rel : m_branch_to_patch_relocations) {
+ ApplyBranchToPatchRelocation(text_words.data() + rel.module_offset / sizeof(u32), rel);
+ }
+ for (const Relocation& rel : m_branch_to_module_relocations) {
+ ApplyBranchToModuleRelocation(m_patch_instructions.data() + rel.patch_offset / sizeof(u32),
+ rel);
+ }
+
+ // Rewrite PC constants and record post trampolines
+ for (const Relocation& rel : m_write_module_pc_relocations) {
+ oaknut::CodeGenerator rc{m_patch_instructions.data() + rel.patch_offset / sizeof(u32)};
+ rc.dx(RebasePc(rel.module_offset));
+ }
+ for (const Trampoline& rel : m_trampolines) {
+ out_trampolines->insert({RebasePc(rel.module_offset), RebasePatch(rel.patch_offset)});
+ }
+
+ // Cortex-A57 seems to treat all exclusives as ordered, but newer processors do not.
+ // Convert to ordered to preserve this assumption.
+ for (const ModuleTextAddress i : m_exclusives) {
+ auto exclusive = Exclusive{text_words[i]};
+ text_words[i] = exclusive.AsOrdered();
+ }
+
+ // Copy to program image
+ if (this->mode == PatchMode::PreText) {
+ std::memcpy(program_image.data(), m_patch_instructions.data(),
+ m_patch_instructions.size() * sizeof(u32));
+ } else {
+ program_image.resize(image_size + patch_size);
+ std::memcpy(program_image.data() + image_size, m_patch_instructions.data(),
+ m_patch_instructions.size() * sizeof(u32));
+ }
+}
+
+size_t Patcher::GetSectionSize() const noexcept {
+ return Common::AlignUp(m_patch_instructions.size() * sizeof(u32), Core::Memory::YUZU_PAGESIZE);
+}
+
+void Patcher::WriteLoadContext() {
+ // This function was called, which modifies X30, so use that as a scratch register.
+ // SP contains the guest X30, so save our return X30 to SP + 8, since we have allocated 16 bytes
+ // of stack.
+ c.STR(X30, SP, 8);
+ c.MRS(X30, oaknut::SystemReg::TPIDR_EL0);
+ c.LDR(X30, X30, offsetof(NativeExecutionParameters, native_context));
+
+ // Load system registers.
+ c.LDR(W0, X30, offsetof(GuestContext, fpsr));
+ c.MSR(oaknut::SystemReg::FPSR, X0);
+ c.LDR(W0, X30, offsetof(GuestContext, fpcr));
+ c.MSR(oaknut::SystemReg::FPCR, X0);
+ c.LDR(W0, X30, offsetof(GuestContext, nzcv));
+ c.MSR(oaknut::SystemReg::NZCV, X0);
+
+ // Load all vector registers.
+ static constexpr size_t VEC_OFF = offsetof(GuestContext, vector_registers);
+ for (int i = 0; i <= 30; i += 2) {
+ c.LDP(oaknut::QReg{i}, oaknut::QReg{i + 1}, X30, VEC_OFF + 16 * i);
+ }
+
+ // Load all general-purpose registers except X30.
+ for (int i = 0; i <= 28; i += 2) {
+ c.LDP(oaknut::XReg{i}, oaknut::XReg{i + 1}, X30, 8 * i);
+ }
+
+ // Reload our return X30 from the stack and return.
+ // The patch code will reload the guest X30 for us.
+ c.LDR(X30, SP, 8);
+ c.RET();
+}
+
+void Patcher::WriteSaveContext() {
+ // This function was called, which modifies X30, so use that as a scratch register.
+ // SP contains the guest X30, so save our X30 to SP + 8, since we have allocated 16 bytes of
+ // stack.
+ c.STR(X30, SP, 8);
+ c.MRS(X30, oaknut::SystemReg::TPIDR_EL0);
+ c.LDR(X30, X30, offsetof(NativeExecutionParameters, native_context));
+
+ // Store all general-purpose registers except X30.
+ for (int i = 0; i <= 28; i += 2) {
+ c.STP(oaknut::XReg{i}, oaknut::XReg{i + 1}, X30, 8 * i);
+ }
+
+ // Store all vector registers.
+ static constexpr size_t VEC_OFF = offsetof(GuestContext, vector_registers);
+ for (int i = 0; i <= 30; i += 2) {
+ c.STP(oaknut::QReg{i}, oaknut::QReg{i + 1}, X30, VEC_OFF + 16 * i);
+ }
+
+ // Store guest system registers, X30 and SP, using X0 as a scratch register.
+ c.STR(X0, SP, PRE_INDEXED, -16);
+ c.LDR(X0, SP, 16);
+ c.STR(X0, X30, 8 * 30);
+ c.ADD(X0, SP, 32);
+ c.STR(X0, X30, offsetof(GuestContext, sp));
+ c.MRS(X0, oaknut::SystemReg::FPSR);
+ c.STR(W0, X30, offsetof(GuestContext, fpsr));
+ c.MRS(X0, oaknut::SystemReg::FPCR);
+ c.STR(W0, X30, offsetof(GuestContext, fpcr));
+ c.MRS(X0, oaknut::SystemReg::NZCV);
+ c.STR(W0, X30, offsetof(GuestContext, nzcv));
+ c.LDR(X0, SP, POST_INDEXED, 16);
+
+ // Reload our return X30 from the stack, and return.
+ c.LDR(X30, SP, 8);
+ c.RET();
+}
+
+void Patcher::WriteSvcTrampoline(ModuleDestLabel module_dest, u32 svc_id) {
+ // We are about to start saving state, so we need to lock the context.
+ this->LockContext();
+
+ // Store guest X30 to the stack. Then, save the context and restore the stack.
+ // This will save all registers except PC, but we know PC at patch time.
+ c.STR(X30, SP, PRE_INDEXED, -16);
+ c.BL(m_save_context);
+ c.LDR(X30, SP, POST_INDEXED, 16);
+
+ // Now that we've saved all registers, we can use any registers as scratch.
+ // Store PC + 4 to arm interface, since we know the instruction offset from the entry point.
+ oaknut::Label pc_after_svc;
+ c.MRS(X1, oaknut::SystemReg::TPIDR_EL0);
+ c.LDR(X1, X1, offsetof(NativeExecutionParameters, native_context));
+ c.LDR(X2, pc_after_svc);
+ c.STR(X2, X1, offsetof(GuestContext, pc));
+
+ // Store SVC number to execute when we return
+ c.MOV(X2, svc_id);
+ c.STR(W2, X1, offsetof(GuestContext, svc_swi));
+
+ // We are calling a SVC. Clear esr_el1 and return it.
+ static_assert(std::is_same_v<std::underlying_type_t<HaltReason>, u64>);
+ oaknut::Label retry;
+ c.ADD(X2, X1, offsetof(GuestContext, esr_el1));
+ c.l(retry);
+ c.LDAXR(X0, X2);
+ c.STLXR(W3, XZR, X2);
+ c.CBNZ(W3, retry);
+
+ // Add "calling SVC" flag. Since this is X0, this is now our return value.
+ c.ORR(X0, X0, static_cast<u64>(HaltReason::SupervisorCall));
+
+ // Offset the GuestContext pointer to the HostContext member.
+ // STP has limited range of [-512, 504] which we can't reach otherwise
+ // NB: Due to this all offsets below are from the start of HostContext.
+ c.ADD(X1, X1, offsetof(GuestContext, host_ctx));
+
+ // Reload host TPIDR_EL0 and SP.
+ static_assert(offsetof(HostContext, host_sp) + 8 == offsetof(HostContext, host_tpidr_el0));
+ c.LDP(X2, X3, X1, offsetof(HostContext, host_sp));
+ c.MOV(SP, X2);
+ c.MSR(oaknut::SystemReg::TPIDR_EL0, X3);
+
+ // Load callee-saved host registers and return to host.
+ static constexpr size_t HOST_REGS_OFF = offsetof(HostContext, host_saved_regs);
+ static constexpr size_t HOST_VREGS_OFF = offsetof(HostContext, host_saved_vregs);
+ c.LDP(X19, X20, X1, HOST_REGS_OFF);
+ c.LDP(X21, X22, X1, HOST_REGS_OFF + 2 * sizeof(u64));
+ c.LDP(X23, X24, X1, HOST_REGS_OFF + 4 * sizeof(u64));
+ c.LDP(X25, X26, X1, HOST_REGS_OFF + 6 * sizeof(u64));
+ c.LDP(X27, X28, X1, HOST_REGS_OFF + 8 * sizeof(u64));
+ c.LDP(X29, X30, X1, HOST_REGS_OFF + 10 * sizeof(u64));
+ c.LDP(Q8, Q9, X1, HOST_VREGS_OFF);
+ c.LDP(Q10, Q11, X1, HOST_VREGS_OFF + 2 * sizeof(u128));
+ c.LDP(Q12, Q13, X1, HOST_VREGS_OFF + 4 * sizeof(u128));
+ c.LDP(Q14, Q15, X1, HOST_VREGS_OFF + 6 * sizeof(u128));
+ c.RET();
+
+ // Write the post-SVC trampoline address, which will jump back to the guest after restoring its
+ // state.
+ m_trampolines.push_back({c.offset(), module_dest});
+
+ // Host called this location. Save the return address so we can
+ // unwind the stack properly when jumping back.
+ c.MRS(X2, oaknut::SystemReg::TPIDR_EL0);
+ c.LDR(X2, X2, offsetof(NativeExecutionParameters, native_context));
+ c.ADD(X0, X2, offsetof(GuestContext, host_ctx));
+ c.STR(X30, X0, offsetof(HostContext, host_saved_regs) + 11 * sizeof(u64));
+
+ // Reload all guest registers except X30 and PC.
+ // The function also expects 16 bytes of stack already allocated.
+ c.STR(X30, SP, PRE_INDEXED, -16);
+ c.BL(m_load_context);
+ c.LDR(X30, SP, POST_INDEXED, 16);
+
+ // Use X1 as a scratch register to restore X30.
+ c.STR(X1, SP, PRE_INDEXED, -16);
+ c.MRS(X1, oaknut::SystemReg::TPIDR_EL0);
+ c.LDR(X1, X1, offsetof(NativeExecutionParameters, native_context));
+ c.LDR(X30, X1, offsetof(GuestContext, cpu_registers) + sizeof(u64) * 30);
+ c.LDR(X1, SP, POST_INDEXED, 16);
+
+ // Unlock the context.
+ this->UnlockContext();
+
+ // Jump back to the instruction after the emulated SVC.
+ this->BranchToModule(module_dest);
+
+ // Store PC after call.
+ c.l(pc_after_svc);
+ this->WriteModulePc(module_dest);
+}
+
+void Patcher::WriteMrsHandler(ModuleDestLabel module_dest, oaknut::XReg dest_reg,
+ oaknut::SystemReg src_reg) {
+ // Retrieve emulated TLS register from GuestContext.
+ c.MRS(dest_reg, oaknut::SystemReg::TPIDR_EL0);
+ if (src_reg == oaknut::SystemReg::TPIDRRO_EL0) {
+ c.LDR(dest_reg, dest_reg, offsetof(NativeExecutionParameters, tpidrro_el0));
+ } else {
+ c.LDR(dest_reg, dest_reg, offsetof(NativeExecutionParameters, tpidr_el0));
+ }
+
+ // Jump back to the instruction after the emulated MRS.
+ this->BranchToModule(module_dest);
+}
+
+void Patcher::WriteMsrHandler(ModuleDestLabel module_dest, oaknut::XReg src_reg) {
+ const auto scratch_reg = src_reg.index() == 0 ? X1 : X0;
+ c.STR(scratch_reg, SP, PRE_INDEXED, -16);
+
+ // Save guest value to NativeExecutionParameters::tpidr_el0.
+ c.MRS(scratch_reg, oaknut::SystemReg::TPIDR_EL0);
+ c.STR(src_reg, scratch_reg, offsetof(NativeExecutionParameters, tpidr_el0));
+
+ // Restore scratch register.
+ c.LDR(scratch_reg, SP, POST_INDEXED, 16);
+
+ // Jump back to the instruction after the emulated MSR.
+ this->BranchToModule(module_dest);
+}
+
+void Patcher::WriteCntpctHandler(ModuleDestLabel module_dest, oaknut::XReg dest_reg) {
+ static Common::Arm64::NativeClock clock{};
+ const auto factor = clock.GetGuestCNTFRQFactor();
+ const auto raw_factor = Common::BitCast<std::array<u64, 2>>(factor);
+
+ const auto use_x2_x3 = dest_reg.index() == 0 || dest_reg.index() == 1;
+ oaknut::XReg scratch0 = use_x2_x3 ? X2 : X0;
+ oaknut::XReg scratch1 = use_x2_x3 ? X3 : X1;
+
+ oaknut::Label factorlo;
+ oaknut::Label factorhi;
+
+ // Save scratches.
+ c.STP(scratch0, scratch1, SP, PRE_INDEXED, -16);
+
+ // Load counter value.
+ c.MRS(dest_reg, oaknut::SystemReg::CNTVCT_EL0);
+
+ // Load scaling factor.
+ c.LDR(scratch0, factorlo);
+ c.LDR(scratch1, factorhi);
+
+ // Multiply low bits and get result.
+ c.UMULH(scratch0, dest_reg, scratch0);
+
+ // Multiply high bits and add low bit result.
+ c.MADD(dest_reg, dest_reg, scratch1, scratch0);
+
+ // Reload scratches.
+ c.LDP(scratch0, scratch1, SP, POST_INDEXED, 16);
+
+ // Jump back to the instruction after the emulated MRS.
+ this->BranchToModule(module_dest);
+
+ // Scaling factor constant values.
+ c.l(factorlo);
+ c.dx(raw_factor[0]);
+ c.l(factorhi);
+ c.dx(raw_factor[1]);
+}
+
+void Patcher::LockContext() {
+ oaknut::Label retry;
+
+ // Save scratches.
+ c.STP(X0, X1, SP, PRE_INDEXED, -16);
+
+ // Reload lock pointer.
+ c.l(retry);
+ c.CLREX();
+ c.MRS(X0, oaknut::SystemReg::TPIDR_EL0);
+ c.ADD(X0, X0, offsetof(NativeExecutionParameters, lock));
+
+ static_assert(SpinLockLocked == 0);
+
+ // Load-linked with acquire ordering.
+ c.LDAXR(W1, X0);
+
+ // If the value was SpinLockLocked, clear monitor and retry.
+ c.CBZ(W1, retry);
+
+ // Store-conditional SpinLockLocked with relaxed ordering.
+ c.STXR(W1, WZR, X0);
+
+ // If we failed to store, retry.
+ c.CBNZ(W1, retry);
+
+ // We succeeded! Reload scratches.
+ c.LDP(X0, X1, SP, POST_INDEXED, 16);
+}
+
+void Patcher::UnlockContext() {
+ // Save scratches.
+ c.STP(X0, X1, SP, PRE_INDEXED, -16);
+
+ // Load lock pointer.
+ c.MRS(X0, oaknut::SystemReg::TPIDR_EL0);
+ c.ADD(X0, X0, offsetof(NativeExecutionParameters, lock));
+
+ // Load SpinLockUnlocked.
+ c.MOV(W1, SpinLockUnlocked);
+
+ // Store value with release ordering.
+ c.STLR(W1, X0);
+
+ // Load scratches.
+ c.LDP(X0, X1, SP, POST_INDEXED, 16);
+}
+
+} // namespace Core::NCE
diff --git a/src/core/arm/nce/patcher.h b/src/core/arm/nce/patcher.h
new file mode 100644
index 000000000..c6d1608c1
--- /dev/null
+++ b/src/core/arm/nce/patcher.h
@@ -0,0 +1,98 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <span>
+#include <unordered_map>
+#include <vector>
+#include <oaknut/code_block.hpp>
+#include <oaknut/oaknut.hpp>
+
+#include "common/common_types.h"
+#include "core/hle/kernel/code_set.h"
+#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/kernel/physical_memory.h"
+
+namespace Core::NCE {
+
+enum class PatchMode : u32 {
+ None,
+ PreText, ///< Patch section is inserted before .text
+ PostData, ///< Patch section is inserted after .data
+};
+
+using ModuleTextAddress = u64;
+using PatchTextAddress = u64;
+using EntryTrampolines = std::unordered_map<ModuleTextAddress, PatchTextAddress>;
+
+class Patcher {
+public:
+ explicit Patcher();
+ ~Patcher();
+
+ void PatchText(const Kernel::PhysicalMemory& program_image,
+ const Kernel::CodeSet::Segment& code);
+ void RelocateAndCopy(Common::ProcessAddress load_base, const Kernel::CodeSet::Segment& code,
+ Kernel::PhysicalMemory& program_image, EntryTrampolines* out_trampolines);
+ size_t GetSectionSize() const noexcept;
+
+ [[nodiscard]] PatchMode GetPatchMode() const noexcept {
+ return mode;
+ }
+
+private:
+ using ModuleDestLabel = uintptr_t;
+
+ struct Trampoline {
+ ptrdiff_t patch_offset;
+ uintptr_t module_offset;
+ };
+
+ void WriteLoadContext();
+ void WriteSaveContext();
+ void LockContext();
+ void UnlockContext();
+ void WriteSvcTrampoline(ModuleDestLabel module_dest, u32 svc_id);
+ void WriteMrsHandler(ModuleDestLabel module_dest, oaknut::XReg dest_reg,
+ oaknut::SystemReg src_reg);
+ void WriteMsrHandler(ModuleDestLabel module_dest, oaknut::XReg src_reg);
+ void WriteCntpctHandler(ModuleDestLabel module_dest, oaknut::XReg dest_reg);
+
+private:
+ void BranchToPatch(uintptr_t module_dest) {
+ m_branch_to_patch_relocations.push_back({c.offset(), module_dest});
+ }
+
+ void BranchToModule(uintptr_t module_dest) {
+ m_branch_to_module_relocations.push_back({c.offset(), module_dest});
+ c.dw(0);
+ }
+
+ void WriteModulePc(uintptr_t module_dest) {
+ m_write_module_pc_relocations.push_back({c.offset(), module_dest});
+ c.dx(0);
+ }
+
+private:
+ // List of patch instructions we have generated.
+ std::vector<u32> m_patch_instructions{};
+
+ // Relocation type for relative branch from module to patch.
+ struct Relocation {
+ ptrdiff_t patch_offset; ///< Offset in bytes from the start of the patch section.
+ uintptr_t module_offset; ///< Offset in bytes from the start of the text section.
+ };
+
+ oaknut::VectorCodeGenerator c;
+ std::vector<Trampoline> m_trampolines;
+ std::vector<Relocation> m_branch_to_patch_relocations{};
+ std::vector<Relocation> m_branch_to_module_relocations{};
+ std::vector<Relocation> m_write_module_pc_relocations{};
+ std::vector<ModuleTextAddress> m_exclusives{};
+ oaknut::Label m_save_context{};
+ oaknut::Label m_load_context{};
+ PatchMode mode{PatchMode::None};
+};
+
+} // namespace Core::NCE
diff --git a/src/core/core.cpp b/src/core/core.cpp
index d7e2efbd7..14d6c8c27 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -309,17 +309,10 @@ struct System::Impl {
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
- // Create a resource limit for the process.
- const auto physical_memory_size =
- kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
- auto* resource_limit = Kernel::CreateResourceLimitForProcess(system, physical_memory_size);
-
// Create the process.
auto main_process = Kernel::KProcess::Create(system.Kernel());
- ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
- Kernel::KProcess::ProcessType::Userland, resource_limit)
- .IsSuccess());
Kernel::KProcess::Register(system.Kernel(), main_process);
+ kernel.AppendNewProcess(main_process);
kernel.MakeApplicationProcess(main_process);
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) {
@@ -418,6 +411,7 @@ struct System::Impl {
services->KillNVNFlinger();
}
kernel.CloseServices();
+ kernel.ShutdownCores();
services.reset();
service_manager.reset();
cheat_engine.reset();
@@ -429,7 +423,6 @@ struct System::Impl {
gpu_core.reset();
host1x_core.reset();
perf_stats.reset();
- kernel.ShutdownCores();
cpu_manager.Shutdown();
debugger.reset();
kernel.Shutdown();
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index e671b270f..d6b5abc68 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -76,6 +76,7 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
}
void CoreTiming::ClearPendingEvents() {
+ std::scoped_lock lock{basic_lock};
event_queue.clear();
}
@@ -113,6 +114,7 @@ bool CoreTiming::IsRunning() const {
}
bool CoreTiming::HasPendingEvents() const {
+ std::scoped_lock lock{basic_lock};
return !(wait_set && event_queue.empty());
}
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 26a8b93a7..21548f0a9 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -161,7 +161,7 @@ private:
std::shared_ptr<EventType> ev_lost;
Common::Event event{};
Common::Event pause_event{};
- std::mutex basic_lock;
+ mutable std::mutex basic_lock;
std::mutex advance_lock;
std::unique_ptr<std::jthread> timer_thread;
std::atomic<bool> paused{};
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 980bb97f9..151eb3870 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -211,6 +211,8 @@ void CpuManager::RunThread(std::stop_token token, std::size_t core) {
system.GPU().ObtainContext();
}
+ system.ArmInterface(core).Initialize();
+
auto& kernel = system.Kernel();
auto& scheduler = *kernel.CurrentScheduler();
auto* thread = scheduler.GetSchedulerCurrentThread();
diff --git a/src/core/debugger/debugger.cpp b/src/core/debugger/debugger.cpp
index a1589fecb..0e270eb50 100644
--- a/src/core/debugger/debugger.cpp
+++ b/src/core/debugger/debugger.cpp
@@ -258,20 +258,20 @@ private:
Kernel::KScopedSchedulerLock sl{system.Kernel()};
// Put all threads to sleep on next scheduler round.
- for (auto* thread : ThreadList()) {
- thread->RequestSuspend(Kernel::SuspendType::Debug);
+ for (auto& thread : ThreadList()) {
+ thread.RequestSuspend(Kernel::SuspendType::Debug);
}
}
void ResumeEmulation(Kernel::KThread* except = nullptr) {
// Wake up all threads.
- for (auto* thread : ThreadList()) {
- if (thread == except) {
+ for (auto& thread : ThreadList()) {
+ if (std::addressof(thread) == except) {
continue;
}
- thread->SetStepState(Kernel::StepState::NotStepping);
- thread->Resume(Kernel::SuspendType::Debug);
+ thread.SetStepState(Kernel::StepState::NotStepping);
+ thread.Resume(Kernel::SuspendType::Debug);
}
}
@@ -283,13 +283,17 @@ private:
}
void UpdateActiveThread() {
- const auto& threads{ThreadList()};
- if (std::find(threads.begin(), threads.end(), state->active_thread) == threads.end()) {
- state->active_thread = threads.front();
+ auto& threads{ThreadList()};
+ for (auto& thread : threads) {
+ if (std::addressof(thread) == state->active_thread) {
+ // Thread is still alive, no need to update.
+ return;
+ }
}
+ state->active_thread = std::addressof(threads.front());
}
- const std::list<Kernel::KThread*>& ThreadList() {
+ Kernel::KProcess::ThreadList& ThreadList() {
return system.ApplicationProcess()->GetThreadList();
}
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index 2076aa8a2..148dd3e39 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -109,7 +109,7 @@ static std::string EscapeXML(std::string_view data) {
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
: DebuggerFrontend(backend_), system{system_} {
- if (system.ApplicationProcess()->Is64BitProcess()) {
+ if (system.ApplicationProcess()->Is64Bit()) {
arch = std::make_unique<GDBStubA64>();
} else {
arch = std::make_unique<GDBStubA32>();
@@ -446,10 +446,10 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
- const Kernel::KThread* thread) {
+ const Kernel::KThread& thread) {
// Read thread type from TLS
- const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)};
- const VAddr argument_thread_type{thread->GetArgument()};
+ const VAddr tls_thread_type{memory.Read32(thread.GetTlsAddress() + 0x1fc)};
+ const VAddr argument_thread_type{thread.GetArgument()};
if (argument_thread_type && tls_thread_type != argument_thread_type) {
// Probably not created by nnsdk, no name available.
@@ -477,10 +477,10 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
}
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
- const Kernel::KThread* thread) {
+ const Kernel::KThread& thread) {
// Read thread type from TLS
- const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)};
- const VAddr argument_thread_type{thread->GetArgument()};
+ const VAddr tls_thread_type{memory.Read64(thread.GetTlsAddress() + 0x1f8)};
+ const VAddr argument_thread_type{thread.GetArgument()};
if (argument_thread_type && tls_thread_type != argument_thread_type) {
// Probably not created by nnsdk, no name available.
@@ -508,16 +508,16 @@ static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory&
}
static std::optional<std::string> GetThreadName(Core::System& system,
- const Kernel::KThread* thread) {
- if (system.ApplicationProcess()->Is64BitProcess()) {
+ const Kernel::KThread& thread) {
+ if (system.ApplicationProcess()->Is64Bit()) {
return GetNameFromThreadType64(system.ApplicationMemory(), thread);
} else {
return GetNameFromThreadType32(system.ApplicationMemory(), thread);
}
}
-static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
- switch (thread->GetWaitReasonForDebugging()) {
+static std::string_view GetThreadWaitReason(const Kernel::KThread& thread) {
+ switch (thread.GetWaitReasonForDebugging()) {
case Kernel::ThreadWaitReasonForDebugging::Sleep:
return "Sleep";
case Kernel::ThreadWaitReasonForDebugging::IPC:
@@ -535,8 +535,8 @@ static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
}
}
-static std::string GetThreadState(const Kernel::KThread* thread) {
- switch (thread->GetState()) {
+static std::string GetThreadState(const Kernel::KThread& thread) {
+ switch (thread.GetState()) {
case Kernel::ThreadState::Initialized:
return "Initialized";
case Kernel::ThreadState::Waiting:
@@ -562,6 +562,120 @@ static std::string PaginateBuffer(std::string_view buffer, std::string_view requ
}
}
+static VAddr GetModuleEnd(Kernel::KProcessPageTable& page_table, VAddr base) {
+ Kernel::KMemoryInfo mem_info;
+ Kernel::Svc::MemoryInfo svc_mem_info;
+ Kernel::Svc::PageInfo page_info;
+ VAddr cur_addr{base};
+
+ // Expect: r-x Code (.text)
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ svc_mem_info = mem_info.GetSvcMemoryInfo();
+ cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+ if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
+ svc_mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
+ return cur_addr - 1;
+ }
+
+ // Expect: r-- Code (.rodata)
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ svc_mem_info = mem_info.GetSvcMemoryInfo();
+ cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+ if (svc_mem_info.state != Kernel::Svc::MemoryState::Code ||
+ svc_mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
+ return cur_addr - 1;
+ }
+
+ // Expect: rw- CodeData (.data)
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ svc_mem_info = mem_info.GetSvcMemoryInfo();
+ cur_addr = svc_mem_info.base_address + svc_mem_info.size;
+ return cur_addr - 1;
+}
+
+static Loader::AppLoader::Modules FindModules(Core::System& system) {
+ Loader::AppLoader::Modules modules;
+
+ auto& page_table = system.ApplicationProcess()->GetPageTable();
+ auto& memory = system.ApplicationMemory();
+ VAddr cur_addr = 0;
+
+ // Look for executable sections in Code or AliasCode regions.
+ while (true) {
+ Kernel::KMemoryInfo mem_info{};
+ Kernel::Svc::PageInfo page_info{};
+ R_ASSERT(
+ page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info), cur_addr));
+ auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+
+ if (svc_mem_info.permission == Kernel::Svc::MemoryPermission::ReadExecute &&
+ (svc_mem_info.state == Kernel::Svc::MemoryState::Code ||
+ svc_mem_info.state == Kernel::Svc::MemoryState::AliasCode)) {
+ // Try to read the module name from its path.
+ constexpr s32 PathLengthMax = 0x200;
+ struct {
+ u32 zero;
+ s32 path_length;
+ std::array<char, PathLengthMax> path;
+ } module_path;
+
+ if (memory.ReadBlock(svc_mem_info.base_address + svc_mem_info.size, &module_path,
+ sizeof(module_path))) {
+ if (module_path.zero == 0 && module_path.path_length > 0) {
+ // Truncate module name.
+ module_path.path[PathLengthMax - 1] = '\0';
+
+ // Ignore leading directories.
+ char* path_pointer = module_path.path.data();
+
+ for (s32 i = 0; i < std::min(PathLengthMax, module_path.path_length) &&
+ module_path.path[i] != '\0';
+ i++) {
+ if (module_path.path[i] == '/' || module_path.path[i] == '\\') {
+ path_pointer = module_path.path.data() + i + 1;
+ }
+ }
+
+ // Insert output.
+ modules.emplace(svc_mem_info.base_address, path_pointer);
+ }
+ }
+ }
+
+ // Check if we're done.
+ const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
+ if (next_address <= cur_addr) {
+ break;
+ }
+
+ cur_addr = next_address;
+ }
+
+ return modules;
+}
+
+static VAddr FindMainModuleEntrypoint(Core::System& system) {
+ Loader::AppLoader::Modules modules;
+ system.GetAppLoader().ReadNSOModules(modules);
+
+ // Do we have a module named main?
+ const auto main = std::find_if(modules.begin(), modules.end(),
+ [](const auto& key) { return key.second == "main"; });
+
+ if (main != modules.end()) {
+ return main->first;
+ }
+
+ // Do we have any loaded executable sections?
+ modules = FindModules(system);
+ if (!modules.empty()) {
+ return modules.begin()->first;
+ }
+
+ // As a last resort, use the start of the code region.
+ return GetInteger(system.ApplicationProcess()->GetPageTable().GetCodeRegionStart());
+}
+
void GDBStub::HandleQuery(std::string_view command) {
if (command.starts_with("TStatus")) {
// no tracepoint support
@@ -573,21 +687,10 @@ void GDBStub::HandleQuery(std::string_view command) {
const auto target_xml{arch->GetTargetXML()};
SendReply(PaginateBuffer(target_xml, command.substr(30)));
} else if (command.starts_with("Offsets")) {
- Loader::AppLoader::Modules modules;
- system.GetAppLoader().ReadNSOModules(modules);
-
- const auto main = std::find_if(modules.begin(), modules.end(),
- [](const auto& key) { return key.second == "main"; });
- if (main != modules.end()) {
- SendReply(fmt::format("TextSeg={:x}", main->first));
- } else {
- SendReply(fmt::format(
- "TextSeg={:x}",
- GetInteger(system.ApplicationProcess()->GetPageTable().GetCodeRegionStart())));
- }
+ const auto main_offset = FindMainModuleEntrypoint(system);
+ SendReply(fmt::format("TextSeg={:x}", main_offset));
} else if (command.starts_with("Xfer:libraries:read::")) {
- Loader::AppLoader::Modules modules;
- system.GetAppLoader().ReadNSOModules(modules);
+ auto modules = FindModules(system);
std::string buffer;
buffer += R"(<?xml version="1.0"?>)";
@@ -604,7 +707,7 @@ void GDBStub::HandleQuery(std::string_view command) {
const auto& threads = system.ApplicationProcess()->GetThreadList();
std::vector<std::string> thread_ids;
for (const auto& thread : threads) {
- thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId()));
+ thread_ids.push_back(fmt::format("{:x}", thread.GetThreadId()));
}
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
} else if (command.starts_with("sThreadInfo")) {
@@ -616,14 +719,14 @@ void GDBStub::HandleQuery(std::string_view command) {
buffer += "<threads>";
const auto& threads = system.ApplicationProcess()->GetThreadList();
- for (const auto* thread : threads) {
+ for (const auto& thread : threads) {
auto thread_name{GetThreadName(system, thread)};
if (!thread_name) {
- thread_name = fmt::format("Thread {:d}", thread->GetThreadId());
+ thread_name = fmt::format("Thread {:d}", thread.GetThreadId());
}
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
- thread->GetThreadId(), thread->GetActiveCore(),
+ thread.GetThreadId(), thread.GetActiveCore(),
EscapeXML(*thread_name), GetThreadState(thread));
}
@@ -727,32 +830,6 @@ static constexpr const char* GetMemoryPermissionString(const Kernel::Svc::Memory
}
}
-static VAddr GetModuleEnd(Kernel::KPageTable& page_table, VAddr base) {
- Kernel::Svc::MemoryInfo mem_info;
- VAddr cur_addr{base};
-
- // Expect: r-x Code (.text)
- mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
- cur_addr = mem_info.base_address + mem_info.size;
- if (mem_info.state != Kernel::Svc::MemoryState::Code ||
- mem_info.permission != Kernel::Svc::MemoryPermission::ReadExecute) {
- return cur_addr - 1;
- }
-
- // Expect: r-- Code (.rodata)
- mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
- cur_addr = mem_info.base_address + mem_info.size;
- if (mem_info.state != Kernel::Svc::MemoryState::Code ||
- mem_info.permission != Kernel::Svc::MemoryPermission::Read) {
- return cur_addr - 1;
- }
-
- // Expect: rw- CodeData (.data)
- mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
- cur_addr = mem_info.base_address + mem_info.size;
- return cur_addr - 1;
-}
-
void GDBStub::HandleRcmd(const std::vector<u8>& command) {
std::string_view command_str{reinterpret_cast<const char*>(&command[0]), command.size()};
std::string reply;
@@ -767,7 +844,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
if (command_str == "get fastmem") {
if (Settings::IsFastmemEnabled()) {
- const auto& impl = page_table.PageTableImpl();
+ const auto& impl = page_table.GetImpl();
const auto region = reinterpret_cast<uintptr_t>(impl.fastmem_arena);
const auto region_bits = impl.current_address_space_width_in_bits;
const auto region_size = 1ULL << region_bits;
@@ -779,26 +856,27 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
reply = "Fastmem is not enabled.\n";
}
} else if (command_str == "get info") {
- Loader::AppLoader::Modules modules;
- system.GetAppLoader().ReadNSOModules(modules);
+ auto modules = FindModules(system);
reply = fmt::format("Process: {:#x} ({})\n"
"Program Id: {:#018x}\n",
process->GetProcessId(), process->GetName(), process->GetProgramId());
- reply += fmt::format("Layout:\n"
- " Alias: {:#012x} - {:#012x}\n"
- " Heap: {:#012x} - {:#012x}\n"
- " Aslr: {:#012x} - {:#012x}\n"
- " Stack: {:#012x} - {:#012x}\n"
- "Modules:\n",
- GetInteger(page_table.GetAliasRegionStart()),
- GetInteger(page_table.GetAliasRegionEnd()),
- GetInteger(page_table.GetHeapRegionStart()),
- GetInteger(page_table.GetHeapRegionEnd()),
- GetInteger(page_table.GetAliasCodeRegionStart()),
- GetInteger(page_table.GetAliasCodeRegionEnd()),
- GetInteger(page_table.GetStackRegionStart()),
- GetInteger(page_table.GetStackRegionEnd()));
+ reply += fmt::format(
+ "Layout:\n"
+ " Alias: {:#012x} - {:#012x}\n"
+ " Heap: {:#012x} - {:#012x}\n"
+ " Aslr: {:#012x} - {:#012x}\n"
+ " Stack: {:#012x} - {:#012x}\n"
+ "Modules:\n",
+ GetInteger(page_table.GetAliasRegionStart()),
+ GetInteger(page_table.GetAliasRegionStart()) + page_table.GetAliasRegionSize() - 1,
+ GetInteger(page_table.GetHeapRegionStart()),
+ GetInteger(page_table.GetHeapRegionStart()) + page_table.GetHeapRegionSize() - 1,
+ GetInteger(page_table.GetAliasCodeRegionStart()),
+ GetInteger(page_table.GetAliasCodeRegionStart()) + page_table.GetAliasCodeRegionSize() -
+ 1,
+ GetInteger(page_table.GetStackRegionStart()),
+ GetInteger(page_table.GetStackRegionStart()) + page_table.GetStackRegionSize() - 1);
for (const auto& [vaddr, name] : modules) {
reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
@@ -811,27 +889,34 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
while (true) {
using MemoryAttribute = Kernel::Svc::MemoryAttribute;
- auto mem_info = page_table.QueryInfo(cur_addr).GetSvcMemoryInfo();
-
- if (mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
- mem_info.base_address + mem_info.size - 1 != std::numeric_limits<u64>::max()) {
- const char* state = GetMemoryStateName(mem_info.state);
- const char* perm = GetMemoryPermissionString(mem_info);
-
- const char l = True(mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
- const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
- const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
- const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
+ Kernel::KMemoryInfo mem_info{};
+ Kernel::Svc::PageInfo page_info{};
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
+ cur_addr));
+ auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+
+ if (svc_mem_info.state != Kernel::Svc::MemoryState::Inaccessible ||
+ svc_mem_info.base_address + svc_mem_info.size - 1 !=
+ std::numeric_limits<u64>::max()) {
+ const char* state = GetMemoryStateName(svc_mem_info.state);
+ const char* perm = GetMemoryPermissionString(svc_mem_info);
+
+ const char l = True(svc_mem_info.attribute & MemoryAttribute::Locked) ? 'L' : '-';
+ const char i =
+ True(svc_mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
+ const char d =
+ True(svc_mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
+ const char u = True(svc_mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
const char p =
- True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
+ True(svc_mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
- reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
- mem_info.base_address,
- mem_info.base_address + mem_info.size - 1, perm, state, l, i,
- d, u, p, mem_info.ipc_count, mem_info.device_count);
+ reply += fmt::format(
+ " {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", svc_mem_info.base_address,
+ svc_mem_info.base_address + svc_mem_info.size - 1, perm, state, l, i, d, u, p,
+ svc_mem_info.ipc_count, svc_mem_info.device_count);
}
- const uintptr_t next_address = mem_info.base_address + mem_info.size;
+ const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
if (next_address <= cur_addr) {
break;
}
@@ -850,10 +935,10 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
}
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
- const auto& threads{system.ApplicationProcess()->GetThreadList()};
- for (auto* thread : threads) {
- if (thread->GetThreadId() == thread_id) {
- return thread;
+ auto& threads{system.ApplicationProcess()->GetThreadList()};
+ for (auto& thread : threads) {
+ if (thread.GetThreadId() == thread_id) {
+ return std::addressof(thread);
}
}
diff --git a/src/core/device_memory.cpp b/src/core/device_memory.cpp
index de3f8ef8f..1aea56a99 100644
--- a/src/core/device_memory.cpp
+++ b/src/core/device_memory.cpp
@@ -6,7 +6,7 @@
namespace Core {
-#ifdef ANDROID
+#ifdef HAS_NCE
constexpr size_t VirtualReserveSize = 1ULL << 38;
#else
constexpr size_t VirtualReserveSize = 1ULL << 39;
@@ -15,6 +15,7 @@ constexpr size_t VirtualReserveSize = 1ULL << 39;
DeviceMemory::DeviceMemory()
: buffer{Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize(),
VirtualReserveSize} {}
+
DeviceMemory::~DeviceMemory() = default;
} // namespace Core
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index 8e291ff67..763a44fee 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -104,16 +104,16 @@ Loader::ResultStatus ProgramMetadata::Reload(VirtualFile file) {
}
/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
- // Allow use of cores 0~3 and thread priorities 1~63.
- constexpr u32 default_thread_info_capability = 0x30007F7;
+ // Allow use of cores 0~3 and thread priorities 16~63.
+ constexpr u32 default_thread_info_capability = 0x30043F7;
ProgramMetadata result;
result.LoadManual(
true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
- 0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/,
- 0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/,
- 0x1FE00000 /*system_resource_size*/, {default_thread_info_capability} /*capabilities*/);
+ 0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x100000 /*main_thread_stack_size*/,
+ 0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, 0 /*system_resource_size*/,
+ {default_thread_info_capability} /*capabilities*/);
return result;
}
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h
index 9f8e74b13..76ee97d78 100644
--- a/src/core/file_sys/program_metadata.h
+++ b/src/core/file_sys/program_metadata.h
@@ -73,6 +73,9 @@ public:
u64 GetFilesystemPermissions() const;
u32 GetSystemResourceSize() const;
const KernelCapabilityDescriptors& GetKernelCapabilities() const;
+ const std::array<u8, 0x10>& GetName() const {
+ return npdm_header.application_name;
+ }
void Print() const;
@@ -164,14 +167,14 @@ private:
u32_le unk_size_2;
};
- Header npdm_header;
- AciHeader aci_header;
- AcidHeader acid_header;
+ Header npdm_header{};
+ AciHeader aci_header{};
+ AcidHeader acid_header{};
- FileAccessControl acid_file_access;
- FileAccessHeader aci_file_access;
+ FileAccessControl acid_file_access{};
+ FileAccessHeader aci_file_access{};
- KernelCapabilityDescriptors aci_kernel_capabilities;
+ KernelCapabilityDescriptors aci_kernel_capabilities{};
};
} // namespace FileSys
diff --git a/src/core/file_sys/romfs.cpp b/src/core/file_sys/romfs.cpp
index 1c580de57..1eb1f439a 100644
--- a/src/core/file_sys/romfs.cpp
+++ b/src/core/file_sys/romfs.cpp
@@ -35,13 +35,14 @@ struct RomFSHeader {
static_assert(sizeof(RomFSHeader) == 0x50, "RomFSHeader has incorrect size.");
struct DirectoryEntry {
+ u32_le parent;
u32_le sibling;
u32_le child_dir;
u32_le child_file;
u32_le hash;
u32_le name_length;
};
-static_assert(sizeof(DirectoryEntry) == 0x14, "DirectoryEntry has incorrect size.");
+static_assert(sizeof(DirectoryEntry) == 0x18, "DirectoryEntry has incorrect size.");
struct FileEntry {
u32_le parent;
@@ -64,25 +65,22 @@ std::pair<Entry, std::string> GetEntry(const VirtualFile& file, std::size_t offs
return {entry, string};
}
-void ProcessFile(VirtualFile file, std::size_t file_offset, std::size_t data_offset,
- u32 this_file_offset, std::shared_ptr<VectorVfsDirectory> parent) {
- while (true) {
+void ProcessFile(const VirtualFile& file, std::size_t file_offset, std::size_t data_offset,
+ u32 this_file_offset, std::shared_ptr<VectorVfsDirectory>& parent) {
+ while (this_file_offset != ROMFS_ENTRY_EMPTY) {
auto entry = GetEntry<FileEntry>(file, file_offset + this_file_offset);
parent->AddFile(std::make_shared<OffsetVfsFile>(
file, entry.first.size, entry.first.offset + data_offset, entry.second));
- if (entry.first.sibling == ROMFS_ENTRY_EMPTY)
- break;
-
this_file_offset = entry.first.sibling;
}
}
-void ProcessDirectory(VirtualFile file, std::size_t dir_offset, std::size_t file_offset,
+void ProcessDirectory(const VirtualFile& file, std::size_t dir_offset, std::size_t file_offset,
std::size_t data_offset, u32 this_dir_offset,
- std::shared_ptr<VectorVfsDirectory> parent) {
- while (true) {
+ std::shared_ptr<VectorVfsDirectory>& parent) {
+ while (this_dir_offset != ROMFS_ENTRY_EMPTY) {
auto entry = GetEntry<DirectoryEntry>(file, dir_offset + this_dir_offset);
auto current = std::make_shared<VectorVfsDirectory>(
std::vector<VirtualFile>{}, std::vector<VirtualDir>{}, entry.second);
@@ -97,14 +95,12 @@ void ProcessDirectory(VirtualFile file, std::size_t dir_offset, std::size_t file
}
parent->AddDirectory(current);
- if (entry.first.sibling == ROMFS_ENTRY_EMPTY)
- break;
this_dir_offset = entry.first.sibling;
}
}
} // Anonymous namespace
-VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
+VirtualDir ExtractRomFS(VirtualFile file) {
RomFSHeader header{};
if (file->ReadObject(&header) != sizeof(RomFSHeader))
return nullptr;
@@ -113,27 +109,17 @@ VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
return nullptr;
const u64 file_offset = header.file_meta.offset;
- const u64 dir_offset = header.directory_meta.offset + 4;
-
- auto root =
- std::make_shared<VectorVfsDirectory>(std::vector<VirtualFile>{}, std::vector<VirtualDir>{},
- file->GetName(), file->GetContainingDirectory());
-
- ProcessDirectory(file, dir_offset, file_offset, header.data_offset, 0, root);
+ const u64 dir_offset = header.directory_meta.offset;
- VirtualDir out = std::move(root);
+ auto root_container = std::make_shared<VectorVfsDirectory>();
- if (type == RomFSExtractionType::SingleDiscard)
- return out->GetSubdirectories().front();
+ ProcessDirectory(file, dir_offset, file_offset, header.data_offset, 0, root_container);
- while (out->GetSubdirectories().size() == 1 && out->GetFiles().empty()) {
- if (Common::ToLower(out->GetSubdirectories().front()->GetName()) == "data" &&
- type == RomFSExtractionType::Truncated)
- break;
- out = out->GetSubdirectories().front();
+ if (auto root = root_container->GetSubdirectory(""); root) {
+ return std::make_shared<CachedVfsDirectory>(std::move(root));
}
- return std::make_shared<CachedVfsDirectory>(std::move(out));
+ return nullptr;
}
VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
diff --git a/src/core/file_sys/romfs.h b/src/core/file_sys/romfs.h
index 5d7f0c2a8..b75ff1aad 100644
--- a/src/core/file_sys/romfs.h
+++ b/src/core/file_sys/romfs.h
@@ -7,16 +7,9 @@
namespace FileSys {
-enum class RomFSExtractionType {
- Full, // Includes data directory
- Truncated, // Traverses into data directory
- SingleDiscard, // Traverses into the first subdirectory of root
-};
-
// Converts a RomFS binary blob to VFS Filesystem
// Returns nullptr on failure
-VirtualDir ExtractRomFS(VirtualFile file,
- RomFSExtractionType type = RomFSExtractionType::Truncated);
+VirtualDir ExtractRomFS(VirtualFile file);
// Converts a VFS filesystem into a RomFS binary
// Returns nullptr on failure
diff --git a/src/core/frontend/emu_window.h b/src/core/frontend/emu_window.h
index a72df034e..c7b48a58d 100644
--- a/src/core/frontend/emu_window.h
+++ b/src/core/frontend/emu_window.h
@@ -167,6 +167,11 @@ protected:
*/
std::pair<f32, f32> MapToTouchScreen(u32 framebuffer_x, u32 framebuffer_y) const;
+ /**
+ * Clip the provided coordinates to be inside the touchscreen area.
+ */
+ std::pair<u32, u32> ClipToTouchScreen(u32 new_x, u32 new_y) const;
+
WindowSystemInfo window_info;
bool strict_context_required = false;
@@ -181,11 +186,6 @@ private:
// By default, ignore this request and do nothing.
}
- /**
- * Clip the provided coordinates to be inside the touchscreen area.
- */
- std::pair<u32, u32> ClipToTouchScreen(u32 new_x, u32 new_y) const;
-
Layout::FramebufferLayout framebuffer_layout; ///< Current framebuffer layout
u32 client_area_width; ///< Current client width, should be set by window impl.
diff --git a/src/core/hid/emulated_console.h b/src/core/hid/emulated_console.h
index 79114bb6d..fae15a556 100644
--- a/src/core/hid/emulated_console.h
+++ b/src/core/hid/emulated_console.h
@@ -38,14 +38,6 @@ using TouchParams = std::array<Common::ParamPackage, MaxTouchDevices>;
using ConsoleMotionValues = ConsoleMotionInfo;
using TouchValues = std::array<Common::Input::TouchStatus, MaxTouchDevices>;
-struct TouchFinger {
- u64 last_touch{};
- Common::Point<float> position{};
- u32 id{};
- TouchAttribute attribute{};
- bool pressed{};
-};
-
// Contains all motion related data that is used on the services
struct ConsoleMotion {
Common::Vec3f accel{};
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp
index 2af3f06fc..a6e681e15 100644
--- a/src/core/hid/emulated_controller.cpp
+++ b/src/core/hid/emulated_controller.cpp
@@ -8,6 +8,7 @@
#include "common/thread.h"
#include "core/hid/emulated_controller.h"
#include "core/hid/input_converter.h"
+#include "core/hle/service/hid/hid_util.h"
namespace Core::HID {
constexpr s32 HID_JOYSTICK_MAX = 0x7fff;
@@ -82,7 +83,7 @@ Settings::ControllerType EmulatedController::MapNPadToSettingsType(NpadStyleInde
}
void EmulatedController::ReloadFromSettings() {
- const auto player_index = NpadIdTypeToIndex(npad_id_type);
+ const auto player_index = Service::HID::NpadIdTypeToIndex(npad_id_type);
const auto& player = Settings::values.players.GetValue()[player_index];
for (std::size_t index = 0; index < player.buttons.size(); ++index) {
@@ -96,18 +97,7 @@ void EmulatedController::ReloadFromSettings() {
}
controller.color_values = {};
- controller.colors_state.fullkey = {
- .body = GetNpadColor(player.body_color_left),
- .button = GetNpadColor(player.button_color_left),
- };
- controller.colors_state.left = {
- .body = GetNpadColor(player.body_color_left),
- .button = GetNpadColor(player.button_color_left),
- };
- controller.colors_state.right = {
- .body = GetNpadColor(player.body_color_right),
- .button = GetNpadColor(player.button_color_right),
- };
+ ReloadColorsFromSettings();
ring_params[0] = Common::ParamPackage(Settings::values.ringcon_analogs);
@@ -128,6 +118,30 @@ void EmulatedController::ReloadFromSettings() {
ReloadInput();
}
+void EmulatedController::ReloadColorsFromSettings() {
+ const auto player_index = Service::HID::NpadIdTypeToIndex(npad_id_type);
+ const auto& player = Settings::values.players.GetValue()[player_index];
+
+ // Avoid updating colors if overridden by physical controller
+ if (controller.color_values[LeftIndex].body != 0 &&
+ controller.color_values[RightIndex].body != 0) {
+ return;
+ }
+
+ controller.colors_state.fullkey = {
+ .body = GetNpadColor(player.body_color_left),
+ .button = GetNpadColor(player.button_color_left),
+ };
+ controller.colors_state.left = {
+ .body = GetNpadColor(player.body_color_left),
+ .button = GetNpadColor(player.button_color_left),
+ };
+ controller.colors_state.right = {
+ .body = GetNpadColor(player.body_color_right),
+ .button = GetNpadColor(player.button_color_right),
+ };
+}
+
void EmulatedController::LoadDevices() {
// TODO(german77): Use more buttons to detect the correct device
const auto left_joycon = button_params[Settings::NativeButton::DRight];
@@ -202,7 +216,7 @@ void EmulatedController::LoadDevices() {
}
void EmulatedController::LoadTASParams() {
- const auto player_index = NpadIdTypeToIndex(npad_id_type);
+ const auto player_index = Service::HID::NpadIdTypeToIndex(npad_id_type);
Common::ParamPackage common_params{};
common_params.Set("engine", "tas");
common_params.Set("port", static_cast<int>(player_index));
@@ -230,10 +244,12 @@ void EmulatedController::LoadTASParams() {
tas_button_params[Settings::NativeButton::DUp].Set("button", 13);
tas_button_params[Settings::NativeButton::DRight].Set("button", 14);
tas_button_params[Settings::NativeButton::DDown].Set("button", 15);
- tas_button_params[Settings::NativeButton::SL].Set("button", 16);
- tas_button_params[Settings::NativeButton::SR].Set("button", 17);
+ tas_button_params[Settings::NativeButton::SLLeft].Set("button", 16);
+ tas_button_params[Settings::NativeButton::SRLeft].Set("button", 17);
tas_button_params[Settings::NativeButton::Home].Set("button", 18);
tas_button_params[Settings::NativeButton::Screenshot].Set("button", 19);
+ tas_button_params[Settings::NativeButton::SLRight].Set("button", 20);
+ tas_button_params[Settings::NativeButton::SRRight].Set("button", 21);
tas_stick_params[Settings::NativeAnalog::LStick].Set("axis_x", 0);
tas_stick_params[Settings::NativeAnalog::LStick].Set("axis_y", 1);
@@ -249,7 +265,7 @@ void EmulatedController::LoadTASParams() {
}
void EmulatedController::LoadVirtualGamepadParams() {
- const auto player_index = NpadIdTypeToIndex(npad_id_type);
+ const auto player_index = Service::HID::NpadIdTypeToIndex(npad_id_type);
Common::ParamPackage common_params{};
common_params.Set("engine", "virtual_gamepad");
common_params.Set("port", static_cast<int>(player_index));
@@ -283,10 +299,12 @@ void EmulatedController::LoadVirtualGamepadParams() {
virtual_button_params[Settings::NativeButton::DUp].Set("button", 13);
virtual_button_params[Settings::NativeButton::DRight].Set("button", 14);
virtual_button_params[Settings::NativeButton::DDown].Set("button", 15);
- virtual_button_params[Settings::NativeButton::SL].Set("button", 16);
- virtual_button_params[Settings::NativeButton::SR].Set("button", 17);
+ virtual_button_params[Settings::NativeButton::SLLeft].Set("button", 16);
+ virtual_button_params[Settings::NativeButton::SRLeft].Set("button", 17);
virtual_button_params[Settings::NativeButton::Home].Set("button", 18);
virtual_button_params[Settings::NativeButton::Screenshot].Set("button", 19);
+ virtual_button_params[Settings::NativeButton::SLRight].Set("button", 20);
+ virtual_button_params[Settings::NativeButton::SRRight].Set("button", 21);
virtual_stick_params[Settings::NativeAnalog::LStick].Set("axis_x", 0);
virtual_stick_params[Settings::NativeAnalog::LStick].Set("axis_y", 1);
@@ -491,9 +509,11 @@ void EmulatedController::ReloadInput() {
});
}
turbo_button_state = 0;
+ is_initalized = true;
}
void EmulatedController::UnloadInput() {
+ is_initalized = false;
for (auto& button : button_devices) {
button.reset();
}
@@ -598,7 +618,7 @@ bool EmulatedController::IsConfiguring() const {
}
void EmulatedController::SaveCurrentConfig() {
- const auto player_index = NpadIdTypeToIndex(npad_id_type);
+ const auto player_index = Service::HID::NpadIdTypeToIndex(npad_id_type);
auto& player = Settings::values.players.GetValue()[player_index];
player.connected = is_connected;
player.controller_type = MapNPadToSettingsType(npad_type);
@@ -854,12 +874,16 @@ void EmulatedController::SetButton(const Common::Input::CallbackStatus& callback
controller.npad_button_state.down.Assign(current_status.value);
controller.debug_pad_button_state.d_down.Assign(current_status.value);
break;
- case Settings::NativeButton::SL:
+ case Settings::NativeButton::SLLeft:
controller.npad_button_state.left_sl.Assign(current_status.value);
+ break;
+ case Settings::NativeButton::SLRight:
controller.npad_button_state.right_sl.Assign(current_status.value);
break;
- case Settings::NativeButton::SR:
+ case Settings::NativeButton::SRLeft:
controller.npad_button_state.left_sr.Assign(current_status.value);
+ break;
+ case Settings::NativeButton::SRRight:
controller.npad_button_state.right_sr.Assign(current_status.value);
break;
case Settings::NativeButton::Home:
@@ -1091,30 +1115,30 @@ void EmulatedController::SetBattery(const Common::Input::CallbackStatus& callbac
bool is_charging = false;
bool is_powered = false;
- NpadBatteryLevel battery_level = 0;
+ NpadBatteryLevel battery_level = NpadBatteryLevel::Empty;
switch (controller.battery_values[index]) {
case Common::Input::BatteryLevel::Charging:
is_charging = true;
is_powered = true;
- battery_level = 6;
+ battery_level = NpadBatteryLevel::Full;
break;
case Common::Input::BatteryLevel::Medium:
- battery_level = 6;
+ battery_level = NpadBatteryLevel::High;
break;
case Common::Input::BatteryLevel::Low:
- battery_level = 4;
+ battery_level = NpadBatteryLevel::Low;
break;
case Common::Input::BatteryLevel::Critical:
- battery_level = 2;
+ battery_level = NpadBatteryLevel::Critical;
break;
case Common::Input::BatteryLevel::Empty:
- battery_level = 0;
+ battery_level = NpadBatteryLevel::Empty;
break;
case Common::Input::BatteryLevel::None:
case Common::Input::BatteryLevel::Full:
default:
is_powered = true;
- battery_level = 8;
+ battery_level = NpadBatteryLevel::Full;
break;
}
@@ -1185,13 +1209,16 @@ void EmulatedController::SetNfc(const Common::Input::CallbackStatus& callback) {
}
bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue vibration) {
+ if (!is_initalized) {
+ return false;
+ }
if (device_index >= output_devices.size()) {
return false;
}
if (!output_devices[device_index]) {
return false;
}
- const auto player_index = NpadIdTypeToIndex(npad_id_type);
+ const auto player_index = Service::HID::NpadIdTypeToIndex(npad_id_type);
const auto& player = Settings::values.players.GetValue()[player_index];
const f32 strength = static_cast<f32>(player.vibration_strength) / 100.0f;
@@ -1217,9 +1244,13 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v
}
bool EmulatedController::IsVibrationEnabled(std::size_t device_index) {
- const auto player_index = NpadIdTypeToIndex(npad_id_type);
+ const auto player_index = Service::HID::NpadIdTypeToIndex(npad_id_type);
const auto& player = Settings::values.players.GetValue()[player_index];
+ if (!is_initalized) {
+ return false;
+ }
+
if (!player.vibration_enabled) {
return false;
}
@@ -1239,6 +1270,10 @@ Common::Input::DriverResult EmulatedController::SetPollingMode(
EmulatedDeviceIndex device_index, Common::Input::PollingMode polling_mode) {
LOG_INFO(Service_HID, "Set polling mode {}, device_index={}", polling_mode, device_index);
+ if (!is_initalized) {
+ return Common::Input::DriverResult::InvalidHandle;
+ }
+
auto& left_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Left)];
auto& right_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& nfc_output_device = output_devices[3];
@@ -1284,6 +1319,10 @@ bool EmulatedController::SetCameraFormat(
Core::IrSensor::ImageTransferProcessorFormat camera_format) {
LOG_INFO(Service_HID, "Set camera format {}", camera_format);
+ if (!is_initalized) {
+ return false;
+ }
+
auto& right_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& camera_output_device = output_devices[2];
@@ -1307,6 +1346,11 @@ void EmulatedController::SetRingParam(Common::ParamPackage param) {
}
bool EmulatedController::HasNfc() const {
+
+ if (!is_initalized) {
+ return false;
+ }
+
const auto& nfc_output_device = output_devices[3];
switch (npad_type) {
@@ -1344,6 +1388,10 @@ bool EmulatedController::RemoveNfcHandle() {
}
bool EmulatedController::StartNfcPolling() {
+ if (!is_initalized) {
+ return false;
+ }
+
auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& nfc_virtual_output_device = output_devices[3];
@@ -1355,6 +1403,10 @@ bool EmulatedController::StartNfcPolling() {
}
bool EmulatedController::StopNfcPolling() {
+ if (!is_initalized) {
+ return false;
+ }
+
auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& nfc_virtual_output_device = output_devices[3];
@@ -1366,6 +1418,10 @@ bool EmulatedController::StopNfcPolling() {
}
bool EmulatedController::ReadAmiiboData(std::vector<u8>& data) {
+ if (!is_initalized) {
+ return false;
+ }
+
auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& nfc_virtual_output_device = output_devices[3];
@@ -1378,6 +1434,10 @@ bool EmulatedController::ReadAmiiboData(std::vector<u8>& data) {
bool EmulatedController::ReadMifareData(const Common::Input::MifareRequest& request,
Common::Input::MifareRequest& out_data) {
+ if (!is_initalized) {
+ return false;
+ }
+
auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& nfc_virtual_output_device = output_devices[3];
@@ -1390,6 +1450,10 @@ bool EmulatedController::ReadMifareData(const Common::Input::MifareRequest& requ
}
bool EmulatedController::WriteMifareData(const Common::Input::MifareRequest& request) {
+ if (!is_initalized) {
+ return false;
+ }
+
auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& nfc_virtual_output_device = output_devices[3];
@@ -1401,6 +1465,10 @@ bool EmulatedController::WriteMifareData(const Common::Input::MifareRequest& req
}
bool EmulatedController::WriteNfc(const std::vector<u8>& data) {
+ if (!is_initalized) {
+ return false;
+ }
+
auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
auto& nfc_virtual_output_device = output_devices[3];
@@ -1412,6 +1480,10 @@ bool EmulatedController::WriteNfc(const std::vector<u8>& data) {
}
void EmulatedController::SetLedPattern() {
+ if (!is_initalized) {
+ return;
+ }
+
for (auto& device : output_devices) {
if (!device) {
continue;
@@ -1627,7 +1699,7 @@ void EmulatedController::SetNpadStyleIndex(NpadStyleIndex npad_type_) {
}
if (is_connected) {
LOG_WARNING(Service_HID, "Controller {} type changed while it's connected",
- NpadIdTypeToIndex(npad_id_type));
+ Service::HID::NpadIdTypeToIndex(npad_id_type));
}
npad_type = npad_type_;
}
@@ -1877,12 +1949,16 @@ NpadButton EmulatedController::GetTurboButtonMask() const {
case Settings::NativeButton::DDown:
button_mask.down.Assign(1);
break;
- case Settings::NativeButton::SL:
+ case Settings::NativeButton::SLLeft:
button_mask.left_sl.Assign(1);
+ break;
+ case Settings::NativeButton::SLRight:
button_mask.right_sl.Assign(1);
break;
- case Settings::NativeButton::SR:
+ case Settings::NativeButton::SRLeft:
button_mask.left_sr.Assign(1);
+ break;
+ case Settings::NativeButton::SRRight:
button_mask.right_sr.Assign(1);
break;
default:
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index d4500583e..d6e20ab66 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -253,6 +253,9 @@ public:
/// Overrides current mapped devices with the stored configuration and reloads all input devices
void ReloadFromSettings();
+ /// Updates current colors with the ones stored in the configuration
+ void ReloadColorsFromSettings();
+
/// Saves the current mapped configuration
void SaveCurrentConfig();
@@ -556,6 +559,7 @@ private:
NpadStyleTag supported_style_tag{NpadStyleSet::All};
bool is_connected{false};
bool is_configuring{false};
+ bool is_initalized{false};
bool system_buttons_enabled{true};
f32 motion_sensitivity{Core::HID::MotionInput::IsAtRestStandard};
u32 turbo_button_state{0};
diff --git a/src/core/hid/hid_core.cpp b/src/core/hid/hid_core.cpp
index cf53c04d9..2cf25a870 100644
--- a/src/core/hid/hid_core.cpp
+++ b/src/core/hid/hid_core.cpp
@@ -6,6 +6,7 @@
#include "core/hid/emulated_controller.h"
#include "core/hid/emulated_devices.h"
#include "core/hid/hid_core.h"
+#include "core/hle/service/hid/hid_util.h"
namespace Core::HID {
@@ -98,11 +99,11 @@ const EmulatedDevices* HIDCore::GetEmulatedDevices() const {
}
EmulatedController* HIDCore::GetEmulatedControllerByIndex(std::size_t index) {
- return GetEmulatedController(IndexToNpadIdType(index));
+ return GetEmulatedController(Service::HID::IndexToNpadIdType(index));
}
const EmulatedController* HIDCore::GetEmulatedControllerByIndex(std::size_t index) const {
- return GetEmulatedController(IndexToNpadIdType(index));
+ return GetEmulatedController(Service::HID::IndexToNpadIdType(index));
}
void HIDCore::SetSupportedStyleTag(NpadStyleTag style_tag) {
diff --git a/src/core/hid/hid_types.h b/src/core/hid/hid_types.h
index 00beb40dd..4bf285f36 100644
--- a/src/core/hid/hid_types.h
+++ b/src/core/hid/hid_types.h
@@ -8,6 +8,7 @@
#include "common/common_types.h"
#include "common/point.h"
#include "common/uuid.h"
+#include "common/vector_math.h"
namespace Core::HID {
@@ -218,6 +219,13 @@ enum class NpadIdType : u32 {
Invalid = 0xFFFFFFFF,
};
+enum class NpadInterfaceType : u8 {
+ Bluetooth = 1,
+ Rail = 2,
+ Usb = 3,
+ Embedded = 4,
+};
+
// This is nn::hid::NpadStyleIndex
enum class NpadStyleIndex : u8 {
None = 0,
@@ -302,6 +310,15 @@ enum class TouchScreenModeForNx : u8 {
Heat2,
};
+// This is nn::hid::system::NpadBatteryLevel
+enum class NpadBatteryLevel : u32 {
+ Empty,
+ Critical,
+ Low,
+ High,
+ Full,
+};
+
// This is nn::hid::NpadStyleTag
struct NpadStyleTag {
union {
@@ -347,6 +364,14 @@ struct TouchState {
};
static_assert(sizeof(TouchState) == 0x28, "Touchstate is an invalid size");
+struct TouchFinger {
+ u64 last_touch{};
+ Common::Point<float> position{};
+ u32 id{};
+ TouchAttribute attribute{};
+ bool pressed{};
+};
+
// This is nn::hid::TouchScreenConfigurationForNx
struct TouchScreenConfigurationForNx {
TouchScreenModeForNx mode{TouchScreenModeForNx::UseSystemSetting};
@@ -385,16 +410,12 @@ struct NpadGcTriggerState {
};
static_assert(sizeof(NpadGcTriggerState) == 0x10, "NpadGcTriggerState is an invalid size");
-// This is nn::hid::system::NpadBatteryLevel
-using NpadBatteryLevel = u32;
-static_assert(sizeof(NpadBatteryLevel) == 0x4, "NpadBatteryLevel is an invalid size");
-
// This is nn::hid::system::NpadPowerInfo
struct NpadPowerInfo {
bool is_powered{};
bool is_charging{};
INSERT_PADDING_BYTES(0x6);
- NpadBatteryLevel battery_level{8};
+ NpadBatteryLevel battery_level{NpadBatteryLevel::Full};
};
static_assert(sizeof(NpadPowerInfo) == 0xC, "NpadPowerInfo is an invalid size");
@@ -578,6 +599,29 @@ struct SixAxisSensorIcInformation {
static_assert(sizeof(SixAxisSensorIcInformation) == 0xC8,
"SixAxisSensorIcInformation is an invalid size");
+// This is nn::hid::SixAxisSensorAttribute
+struct SixAxisSensorAttribute {
+ union {
+ u32 raw{};
+ BitField<0, 1, u32> is_connected;
+ BitField<1, 1, u32> is_interpolated;
+ };
+};
+static_assert(sizeof(SixAxisSensorAttribute) == 4, "SixAxisSensorAttribute is an invalid size");
+
+// This is nn::hid::SixAxisSensorState
+struct SixAxisSensorState {
+ s64 delta_time{};
+ s64 sampling_number{};
+ Common::Vec3f accel{};
+ Common::Vec3f gyro{};
+ Common::Vec3f rotation{};
+ std::array<Common::Vec3f, 3> orientation{};
+ SixAxisSensorAttribute attribute{};
+ INSERT_PADDING_BYTES(4); // Reserved
+};
+static_assert(sizeof(SixAxisSensorState) == 0x60, "SixAxisSensorState is an invalid size");
+
// This is nn::hid::VibrationDeviceHandle
struct VibrationDeviceHandle {
NpadStyleIndex npad_type{NpadStyleIndex::None};
@@ -688,60 +732,4 @@ struct UniquePadId {
};
static_assert(sizeof(UniquePadId) == 0x8, "UniquePadId is an invalid size");
-/// Converts a NpadIdType to an array index.
-constexpr size_t NpadIdTypeToIndex(NpadIdType npad_id_type) {
- switch (npad_id_type) {
- case NpadIdType::Player1:
- return 0;
- case NpadIdType::Player2:
- return 1;
- case NpadIdType::Player3:
- return 2;
- case NpadIdType::Player4:
- return 3;
- case NpadIdType::Player5:
- return 4;
- case NpadIdType::Player6:
- return 5;
- case NpadIdType::Player7:
- return 6;
- case NpadIdType::Player8:
- return 7;
- case NpadIdType::Handheld:
- return 8;
- case NpadIdType::Other:
- return 9;
- default:
- return 0;
- }
-}
-
-/// Converts an array index to a NpadIdType
-constexpr NpadIdType IndexToNpadIdType(size_t index) {
- switch (index) {
- case 0:
- return NpadIdType::Player1;
- case 1:
- return NpadIdType::Player2;
- case 2:
- return NpadIdType::Player3;
- case 3:
- return NpadIdType::Player4;
- case 4:
- return NpadIdType::Player5;
- case 5:
- return NpadIdType::Player6;
- case 6:
- return NpadIdType::Player7;
- case 7:
- return NpadIdType::Player8;
- case 8:
- return NpadIdType::Handheld;
- case 9:
- return NpadIdType::Other;
- default:
- return NpadIdType::Invalid;
- }
-}
-
} // namespace Core::HID
diff --git a/src/core/hid/input_interpreter.cpp b/src/core/hid/input_interpreter.cpp
index 76d6b8ab0..a6bdd28f2 100644
--- a/src/core/hid/input_interpreter.cpp
+++ b/src/core/hid/input_interpreter.cpp
@@ -5,21 +5,22 @@
#include "core/hid/hid_types.h"
#include "core/hid/input_interpreter.h"
#include "core/hle/service/hid/controllers/npad.h"
-#include "core/hle/service/hid/hid.h"
+#include "core/hle/service/hid/hid_server.h"
+#include "core/hle/service/hid/resource_manager.h"
#include "core/hle/service/sm/sm.h"
InputInterpreter::InputInterpreter(Core::System& system)
: npad{system.ServiceManager()
- .GetService<Service::HID::Hid>("hid")
- ->GetAppletResource()
- ->GetController<Service::HID::Controller_NPad>(Service::HID::HidController::NPad)} {
+ .GetService<Service::HID::IHidServer>("hid")
+ ->GetResourceManager()
+ ->GetNpad()} {
ResetButtonStates();
}
InputInterpreter::~InputInterpreter() = default;
void InputInterpreter::PollInput() {
- const auto button_state = npad.GetAndResetPressState();
+ const auto button_state = npad->GetAndResetPressState();
previous_index = current_index;
current_index = (current_index + 1) % button_states.size();
diff --git a/src/core/hid/input_interpreter.h b/src/core/hid/input_interpreter.h
index 8c521b381..3569aac93 100644
--- a/src/core/hid/input_interpreter.h
+++ b/src/core/hid/input_interpreter.h
@@ -16,7 +16,7 @@ enum class NpadButton : u64;
}
namespace Service::HID {
-class Controller_NPad;
+class NPad;
}
/**
@@ -101,7 +101,7 @@ public:
}
private:
- Service::HID::Controller_NPad& npad;
+ std::shared_ptr<Service::HID::NPad> npad;
/// Stores 9 consecutive button states polled from HID.
std::array<Core::HID::NpadButton, 9> button_states{};
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 4cfdf4558..37fa39a73 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -8,7 +8,11 @@
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
+#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_trace.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc_results.h"
namespace Kernel::Board::Nintendo::Nx {
@@ -30,6 +34,8 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
+constexpr const std::size_t SecureAlignment = 128_KiB;
+
namespace {
using namespace Common::Literals;
@@ -183,4 +189,66 @@ u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
return GenerateUniformRange(min, max, GenerateRandomU64);
}
+size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
+ if (pool == static_cast<u32>(KMemoryManager::Pool::Applet)) {
+ return 0;
+ } else {
+ // return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
+ return size;
+ }
+}
+
+Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
+ u32 pool) {
+ // Applet secure memory is handled separately.
+ UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
+
+ // Ensure the size is aligned.
+ const size_t alignment =
+ (pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
+ R_UNLESS(Common::IsAligned(size, alignment), ResultInvalidSize);
+
+ // Allocate the memory.
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress paddr = kernel.MemoryManager().AllocateAndOpenContinuous(
+ num_pages, alignment / PageSize,
+ KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool),
+ KMemoryManager::Direction::FromFront));
+ R_UNLESS(paddr != 0, ResultOutOfMemory);
+
+ // Ensure we don't leak references to the memory on error.
+ ON_RESULT_FAILURE {
+ kernel.MemoryManager().Close(paddr, num_pages);
+ };
+
+ // We succeeded.
+ *out = KPageTable::GetHeapVirtualAddress(kernel, paddr);
+ R_SUCCEED();
+}
+
+void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
+ u32 pool) {
+ // Applet secure memory is handled separately.
+ UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
+
+ // Ensure the size is aligned.
+ const size_t alignment =
+ (pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
+ ASSERT(Common::IsAligned(GetInteger(address), alignment));
+ ASSERT(Common::IsAligned(size, alignment));
+
+ // Close the secure region's pages.
+ kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel, address),
+ size / PageSize);
+}
+
+// Insecure Memory.
+KResourceLimit* KSystemControl::GetInsecureMemoryResourceLimit(KernelCore& kernel) {
+ return kernel.GetSystemResourceLimit();
+}
+
+u32 KSystemControl::GetInsecureMemoryPool() {
+ return static_cast<u32>(KMemoryManager::Pool::SystemNonSecure);
+}
+
} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index b477e8193..60c5e58b7 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -4,6 +4,12 @@
#pragma once
#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+class KernelCore;
+class KResourceLimit;
+} // namespace Kernel
namespace Kernel::Board::Nintendo::Nx {
@@ -25,8 +31,20 @@ public:
static std::size_t GetMinimumNonSecureSystemPoolSize();
};
+ // Randomness.
static u64 GenerateRandomRange(u64 min, u64 max);
static u64 GenerateRandomU64();
+
+ // Secure Memory.
+ static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
+ static Result AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
+ u32 pool);
+ static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
+ u32 pool);
+
+ // Insecure Memory.
+ static KResourceLimit* GetInsecureMemoryResourceLimit(KernelCore& kernel);
+ static u32 GetInsecureMemoryPool();
};
} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index af1af2b78..4d2d0098e 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -75,12 +75,26 @@ struct CodeSet final {
return segments[2];
}
+#ifdef HAS_NCE
+ Segment& PatchSegment() {
+ return patch_segment;
+ }
+
+ const Segment& PatchSegment() const {
+ return patch_segment;
+ }
+#endif
+
/// The overall data that backs this code set.
Kernel::PhysicalMemory memory;
/// The segments that comprise this code set.
std::array<Segment, 3> segments;
+#ifdef HAS_NCE
+ Segment patch_segment;
+#endif
+
/// The entry point address for this code set.
KProcessAddress entrypoint = 0;
};
diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp
index 32173e52b..23258071e 100644
--- a/src/core/hle/kernel/k_address_space_info.cpp
+++ b/src/core/hle/kernel/k_address_space_info.cpp
@@ -25,8 +25,8 @@ constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
{ .bit_width = 36, .address = 2_GiB , .size = 64_GiB - 2_GiB , .type = KAddressSpaceInfo::Type::MapLarge, },
{ .bit_width = 36, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Alias, },
-#ifdef ANDROID
- // With Android, we use a 38-bit address space due to memory limitations. This should (safely) truncate ASLR region.
+#ifdef HAS_NCE
+ // With NCE, we use a 38-bit address space due to memory limitations. This should (safely) truncate ASLR region.
{ .bit_width = 39, .address = 128_MiB , .size = 256_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, },
#else
{ .bit_width = 39, .address = 128_MiB , .size = 512_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, },
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
index e7da7a21d..274fee493 100644
--- a/src/core/hle/kernel/k_capabilities.cpp
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -4,14 +4,16 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
+#include "core/hle/kernel/k_trace.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_version.h"
namespace Kernel {
-Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps,
+ KProcessPageTable* page_table) {
// We're initializing an initial process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
@@ -41,7 +43,8 @@ Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTabl
R_RETURN(this->SetCapabilities(kern_caps, page_table));
}
-Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
+Result KCapabilities::InitializeForUser(std::span<const u32> user_caps,
+ KProcessPageTable* page_table) {
// We're initializing a user process.
m_svc_access_flags.reset();
m_irq_access_flags.reset();
@@ -121,7 +124,7 @@ Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
R_SUCCEED();
}
-Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
+Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table) {
const auto range_pack = MapRange{cap};
const auto size_pack = MapRangeSize{size_cap};
@@ -142,16 +145,13 @@ Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* p
? KMemoryPermission::UserRead
: KMemoryPermission::UserReadWrite;
if (MapRangeSize{size_cap}.normal) {
- // R_RETURN(page_table->MapStatic(phys_addr, size, perm));
+ R_RETURN(page_table->MapStatic(phys_addr, size, perm));
} else {
- // R_RETURN(page_table->MapIo(phys_addr, size, perm));
+ R_RETURN(page_table->MapIo(phys_addr, size, perm));
}
-
- UNIMPLEMENTED();
- R_SUCCEED();
}
-Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapIoPage_(const u32 cap, KProcessPageTable* page_table) {
// Get/validate address/size
const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
const size_t num_pages = 1;
@@ -160,10 +160,7 @@ Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
// Do the mapping.
- // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
-
- UNIMPLEMENTED();
- R_SUCCEED();
+ R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission::UserReadWrite));
}
template <typename F>
@@ -200,13 +197,11 @@ Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
R_SUCCEED();
}
-Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
+Result KCapabilities::MapRegion_(const u32 cap, KProcessPageTable* page_table) {
// Map each region into the process's page table.
return ProcessMapRegionCapability(
- cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
- // R_RETURN(page_table->MapRegion(region_type, perm));
- UNIMPLEMENTED();
- R_SUCCEED();
+ cap, [page_table](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
+ R_RETURN(page_table->MapRegion(region_type, perm));
});
}
@@ -280,7 +275,7 @@ Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
}
Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
- KPageTable* page_table) {
+ KProcessPageTable* page_table) {
// Validate this is a capability we can act on.
const auto type = GetCapabilityType(cap);
R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
@@ -318,7 +313,7 @@ Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
}
}
-Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
+Result KCapabilities::SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table) {
u32 set_flags = 0, set_svc = 0;
for (size_t i = 0; i < caps.size(); i++) {
@@ -335,6 +330,8 @@ Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* pag
// Map the range.
R_TRY(this->MapRange_(cap, size_cap, page_table));
+ } else if (GetCapabilityType(cap) == CapabilityType::MapRegion && !IsKTraceEnabled) {
+ continue;
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
index de766c811..013d952ad 100644
--- a/src/core/hle/kernel/k_capabilities.h
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -15,15 +15,15 @@
namespace Kernel {
-class KPageTable;
+class KProcessPageTable;
class KernelCore;
class KCapabilities {
public:
constexpr explicit KCapabilities() = default;
- Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
- Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
+ Result InitializeForKip(std::span<const u32> kern_caps, KProcessPageTable* page_table);
+ Result InitializeForUser(std::span<const u32> user_caps, KProcessPageTable* page_table);
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
@@ -200,8 +200,8 @@ private:
RawCapabilityValue raw;
BitField<0, 15, CapabilityType> id;
- BitField<15, 4, u32> major_version;
- BitField<19, 13, u32> minor_version;
+ BitField<15, 4, u32> minor_version;
+ BitField<19, 13, u32> major_version;
};
union HandleTable {
@@ -264,9 +264,9 @@ private:
Result SetCorePriorityCapability(const u32 cap);
Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
- Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
- Result MapIoPage_(const u32 cap, KPageTable* page_table);
- Result MapRegion_(const u32 cap, KPageTable* page_table);
+ Result MapRange_(const u32 cap, const u32 size_cap, KProcessPageTable* page_table);
+ Result MapIoPage_(const u32 cap, KProcessPageTable* page_table);
+ Result MapRegion_(const u32 cap, KProcessPageTable* page_table);
Result SetInterruptPairCapability(const u32 cap);
Result SetProgramTypeCapability(const u32 cap);
Result SetKernelVersionCapability(const u32 cap);
@@ -277,8 +277,9 @@ private:
static Result ProcessMapRegionCapability(const u32 cap, F f);
static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
- Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
- Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
+ Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
+ KProcessPageTable* page_table);
+ Result SetCapabilities(std::span<const u32> caps, KProcessPageTable* page_table);
private:
Svc::SvcAccessFlagSet m_svc_access_flags{};
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index efbac0e6a..7633a51fb 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -107,12 +107,12 @@ KConditionVariable::KConditionVariable(Core::System& system)
KConditionVariable::~KConditionVariable() = default;
-Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
- KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
+Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress addr) {
+ KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
- KScopedSchedulerLock sl(m_kernel);
+ KScopedSchedulerLock sl(kernel);
// Remove waiter thread.
bool has_waiters{};
@@ -133,7 +133,7 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
// Write the value to userspace.
Result result{ResultSuccess};
- if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
+ if (WriteToUser(kernel, addr, std::addressof(next_value))) [[likely]] {
result = ResultSuccess;
} else {
result = ResultInvalidCurrentMemory;
@@ -148,28 +148,28 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
}
}
-Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
- KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
- ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
+Result KConditionVariable::WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
+ u32 value) {
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
+ ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.
KThread* owner_thread{};
{
- KScopedSchedulerLock sl(m_kernel);
+ KScopedSchedulerLock sl(kernel);
// Check if the thread should terminate.
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
// Read the tag from userspace.
u32 test_tag{};
- R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
- ResultInvalidCurrentMemory);
+ R_UNLESS(ReadFromUser(kernel, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done.
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
// Get the lock owner thread.
- owner_thread = GetCurrentProcess(m_kernel)
+ owner_thread = GetCurrentProcess(kernel)
.GetHandleTable()
.GetObjectWithoutPseudoHandle<KThread>(handle)
.ReleasePointerUnsafe();
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 8c2f3ae51..2620c8e39 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -24,11 +24,12 @@ public:
explicit KConditionVariable(Core::System& system);
~KConditionVariable();
- // Arbitration
- Result SignalToAddress(KProcessAddress addr);
- Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
+ // Arbitration.
+ static Result SignalToAddress(KernelCore& kernel, KProcessAddress addr);
+ static Result WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
+ u32 value);
- // Condition variable
+ // Condition variable.
void Signal(u64 cv_key, s32 count);
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index f48896715..f0703f795 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -54,7 +54,7 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
R_SUCCEED();
}
-Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Map(KProcessPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address, u32 option, bool is_aligned) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
@@ -113,7 +113,7 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_
R_SUCCEED();
}
-Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
+Result KDeviceAddressSpace::Unmap(KProcessPageTable* page_table, KProcessAddress process_address,
size_t size, u64 device_address) {
// Check that the address falls within the space.
R_UNLESS((m_space_address <= device_address &&
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index 18556e3cc..ff0ec8152 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -5,7 +5,7 @@
#include <string>
-#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process_page_table.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -31,23 +31,23 @@ public:
Result Attach(Svc::DeviceName device_name);
Result Detach(Svc::DeviceName device_name);
- Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result MapByForce(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
}
- Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result MapAligned(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option) {
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
}
- Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result Unmap(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address);
static void Initialize();
private:
- Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
+ Result Map(KProcessPageTable* page_table, KProcessAddress process_address, size_t size,
u64 device_address, u32 option, bool is_aligned);
private:
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index fe6a20168..22d79569a 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -22,7 +22,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
KScopedSchedulerLock sl{kernel};
// Pin the current thread.
- process->PinCurrentThread(core_id);
+ process->PinCurrentThread();
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index c8122644f..d7adb3169 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -394,6 +394,14 @@ private:
return region.GetEndAddress();
}
+public:
+ static const KMemoryRegion* Find(const KMemoryLayout& layout, KVirtualAddress address) {
+ return Find(address, layout.GetVirtualMemoryRegionTree());
+ }
+ static const KMemoryRegion* Find(const KMemoryLayout& layout, KPhysicalAddress address) {
+ return Find(address, layout.GetPhysicalMemoryRegionTree());
+ }
+
private:
u64 m_linear_phys_to_virt_diff{};
u64 m_linear_virt_to_phys_diff{};
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 637558e10..0a973ec8c 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -11,6 +11,7 @@
#include "core/hle/kernel/initial_process.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_group.h"
+#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -168,11 +169,37 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
}
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
- UNREACHABLE();
+ const u32 pool_index = static_cast<u32>(pool);
+
+ // Lock the pool.
+ KScopedLightLock lk(m_pool_locks[pool_index]);
+
+ // Check that we don't already have an optimized process.
+ R_UNLESS(!m_has_optimized_process[pool_index], ResultBusy);
+
+ // Set the optimized process id.
+ m_optimized_process_ids[pool_index] = process_id;
+ m_has_optimized_process[pool_index] = true;
+
+ // Clear the management area for the optimized process.
+ for (auto* manager = this->GetFirstManager(pool, Direction::FromFront); manager != nullptr;
+ manager = this->GetNextManager(manager, Direction::FromFront)) {
+ manager->InitializeOptimizedMemory(m_system.Kernel());
+ }
+
+ R_SUCCEED();
}
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
- UNREACHABLE();
+ const u32 pool_index = static_cast<u32>(pool);
+
+ // Lock the pool.
+ KScopedLightLock lk(m_pool_locks[pool_index]);
+
+ // If the process was optimized, clear it.
+ if (m_has_optimized_process[pool_index] && m_optimized_process_ids[pool_index] == process_id) {
+ m_has_optimized_process[pool_index] = false;
+ }
}
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
@@ -207,7 +234,7 @@ KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, siz
// Maintain the optimized memory bitmap, if we should.
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
- UNIMPLEMENTED();
+ chosen_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block, num_pages);
}
// Open the first reference to the pages.
@@ -255,7 +282,8 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
// Maintain the optimized memory bitmap, if we should.
if (unoptimized) {
- UNIMPLEMENTED();
+ cur_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block,
+ pages_per_alloc);
}
num_pages -= pages_per_alloc;
@@ -358,8 +386,8 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Process part or all of the block.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
- any_new =
- manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
+ any_new = manager.ProcessOptimizedAllocation(m_system.Kernel(), cur_address,
+ cur_pages, fill_pattern);
// Advance.
cur_address += cur_pages * PageSize;
@@ -382,7 +410,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Track some or all of the current pages.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
- manager.TrackOptimizedAllocation(cur_address, cur_pages);
+ manager.TrackOptimizedAllocation(m_system.Kernel(), cur_address, cur_pages);
// Advance.
cur_address += cur_pages * PageSize;
@@ -427,17 +455,82 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
return total_management_size;
}
-void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
- UNREACHABLE();
+void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
+
+ std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
}
-void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
- UNREACHABLE();
+void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages) {
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
+
+ // Get the range we're tracking.
+ size_t offset = this->GetPageOffset(block);
+ const size_t last = offset + num_pages - 1;
+
+ // Track.
+ while (offset <= last) {
+ // Mark the page as not being optimized-allocated.
+ optimize_map[offset / Common::BitSize<u64>()] &=
+ ~(u64(1) << (offset % Common::BitSize<u64>()));
+
+ offset++;
+ }
+}
+
+void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages) {
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
+
+ // Get the range we're tracking.
+ size_t offset = this->GetPageOffset(block);
+ const size_t last = offset + num_pages - 1;
+
+ // Track.
+ while (offset <= last) {
+ // Mark the page as being optimized-allocated.
+ optimize_map[offset / Common::BitSize<u64>()] |=
+ (u64(1) << (offset % Common::BitSize<u64>()));
+
+ offset++;
+ }
}
-bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
- u8 fill_pattern) {
- UNREACHABLE();
+bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages, u8 fill_pattern) {
+ auto& device_memory = kernel.System().DeviceMemory();
+ auto optimize_pa = KPageTable::GetHeapPhysicalAddress(kernel, m_management_region);
+ auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
+
+ // We want to return whether any pages were newly allocated.
+ bool any_new = false;
+
+ // Get the range we're processing.
+ size_t offset = this->GetPageOffset(block);
+ const size_t last = offset + num_pages - 1;
+
+ // Process.
+ while (offset <= last) {
+ // Check if the page has been optimized-allocated before.
+ if ((optimize_map[offset / Common::BitSize<u64>()] &
+ (u64(1) << (offset % Common::BitSize<u64>()))) == 0) {
+ // If not, it's new.
+ any_new = true;
+
+ // Fill the page.
+ auto* ptr = device_memory.GetPointer<u8>(m_heap.GetAddress());
+ std::memset(ptr + offset * PageSize, fill_pattern, PageSize);
+ }
+
+ offset++;
+ }
+
+ // Return the number of pages we processed.
+ return any_new;
}
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 7e4b41319..c5a487af9 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -216,14 +216,14 @@ private:
m_heap.SetInitialUsedSize(reserved_size);
}
- void InitializeOptimizedMemory() {
- UNIMPLEMENTED();
- }
+ void InitializeOptimizedMemory(KernelCore& kernel);
- void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
- void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
+ void TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages);
+ void TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages);
- bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
+ bool ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
+ size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const {
return m_pool;
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
deleted file mode 100644
index 217ccbae3..000000000
--- a/src/core/hle/kernel/k_page_table.cpp
+++ /dev/null
@@ -1,3519 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/literals.h"
-#include "common/scope_exit.h"
-#include "common/settings.h"
-#include "core/core.h"
-#include "core/hle/kernel/k_address_space_info.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_group.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_system_control.h"
-#include "core/hle/kernel/k_system_resource.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-namespace {
-
-class KScopedLightLockPair {
- YUZU_NON_COPYABLE(KScopedLightLockPair);
- YUZU_NON_MOVEABLE(KScopedLightLockPair);
-
-private:
- KLightLock* m_lower;
- KLightLock* m_upper;
-
-public:
- KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
- // Ensure our locks are in a consistent order.
- if (std::addressof(lhs) <= std::addressof(rhs)) {
- m_lower = std::addressof(lhs);
- m_upper = std::addressof(rhs);
- } else {
- m_lower = std::addressof(rhs);
- m_upper = std::addressof(lhs);
- }
-
- // Acquire both locks.
- m_lower->Lock();
- if (m_lower != m_upper) {
- m_upper->Lock();
- }
- }
-
- ~KScopedLightLockPair() {
- // Unlock the upper lock.
- if (m_upper != nullptr && m_upper != m_lower) {
- m_upper->Unlock();
- }
-
- // Unlock the lower lock.
- if (m_lower != nullptr) {
- m_lower->Unlock();
- }
- }
-
-public:
- // Utility.
- void TryUnlockHalf(KLightLock& lock) {
- // Only allow unlocking if the lock is half the pair.
- if (m_lower != m_upper) {
- // We want to be sure the lock is one we own.
- if (m_lower == std::addressof(lock)) {
- lock.Unlock();
- m_lower = nullptr;
- } else if (m_upper == std::addressof(lock)) {
- lock.Unlock();
- m_upper = nullptr;
- }
- }
- }
-};
-
-using namespace Common::Literals;
-
-constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
- switch (as_type) {
- case FileSys::ProgramAddressSpaceType::Is32Bit:
- case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
- return 32;
- case FileSys::ProgramAddressSpaceType::Is36Bit:
- return 36;
- case FileSys::ProgramAddressSpaceType::Is39Bit:
- return 39;
- default:
- ASSERT(false);
- return {};
- }
-}
-
-} // namespace
-
-KPageTable::KPageTable(Core::System& system_)
- : m_general_lock{system_.Kernel()},
- m_map_physical_memory_lock{system_.Kernel()}, m_system{system_}, m_kernel{system_.Kernel()} {}
-
-KPageTable::~KPageTable() = default;
-
-Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- bool enable_das_merge, bool from_back,
- KMemoryManager::Pool pool, KProcessAddress code_addr,
- size_t code_size, KSystemResource* system_resource,
- KResourceLimit* resource_limit,
- Core::Memory::Memory& memory) {
-
- const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
- };
- const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
- return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
- };
-
- // Set the tracking memory
- m_memory = std::addressof(memory);
-
- // Set our width and heap/alias sizes
- m_address_space_width = GetAddressSpaceWidthFromType(as_type);
- const KProcessAddress start = 0;
- const KProcessAddress end{1ULL << m_address_space_width};
- size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
- size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
-
- ASSERT(code_addr < code_addr + code_size);
- ASSERT(code_addr + code_size - 1 <= end - 1);
-
- // Adjust heap/alias size if we don't have an alias region
- if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
- heap_region_size += alias_region_size;
- alias_region_size = 0;
- }
-
- // Set code regions and determine remaining
- constexpr size_t RegionAlignment{2_MiB};
- KProcessAddress process_code_start{};
- KProcessAddress process_code_end{};
- size_t stack_region_size{};
- size_t kernel_map_region_size{};
-
- if (m_address_space_width == 39) {
- alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
- heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
- stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
- kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
- m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
- m_alias_code_region_start = m_code_region_start;
- m_alias_code_region_end = m_code_region_end;
- process_code_start = Common::AlignDown(GetInteger(code_addr), RegionAlignment);
- process_code_end = Common::AlignUp(GetInteger(code_addr) + code_size, RegionAlignment);
- } else {
- stack_region_size = 0;
- kernel_map_region_size = 0;
- m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
- m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
- m_stack_region_start = m_code_region_start;
- m_alias_code_region_start = m_code_region_start;
- m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
- GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
- m_stack_region_end = m_code_region_end;
- m_kernel_map_region_start = m_code_region_start;
- m_kernel_map_region_end = m_code_region_end;
- process_code_start = m_code_region_start;
- process_code_end = m_code_region_end;
- }
-
- // Set other basic fields
- m_enable_aslr = enable_aslr;
- m_enable_device_address_space_merge = enable_das_merge;
- m_address_space_start = start;
- m_address_space_end = end;
- m_is_kernel = false;
- m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
- m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
- m_resource_limit = resource_limit;
-
- // Determine the region we can place our undetermineds in
- KProcessAddress alloc_start{};
- size_t alloc_size{};
- if ((process_code_start - m_code_region_start) >= (end - process_code_end)) {
- alloc_start = m_code_region_start;
- alloc_size = process_code_start - m_code_region_start;
- } else {
- alloc_start = process_code_end;
- alloc_size = end - process_code_end;
- }
- const size_t needed_size =
- (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
- R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
-
- const size_t remaining_size{alloc_size - needed_size};
-
- // Determine random placements for each region
- size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
- if (enable_aslr) {
- alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
- RegionAlignment;
- }
-
- // Setup heap and alias regions
- m_alias_region_start = alloc_start + alias_rnd;
- m_alias_region_end = m_alias_region_start + alias_region_size;
- m_heap_region_start = alloc_start + heap_rnd;
- m_heap_region_end = m_heap_region_start + heap_region_size;
-
- if (alias_rnd <= heap_rnd) {
- m_heap_region_start += alias_region_size;
- m_heap_region_end += alias_region_size;
- } else {
- m_alias_region_start += heap_region_size;
- m_alias_region_end += heap_region_size;
- }
-
- // Setup stack region
- if (stack_region_size) {
- m_stack_region_start = alloc_start + stack_rnd;
- m_stack_region_end = m_stack_region_start + stack_region_size;
-
- if (alias_rnd < stack_rnd) {
- m_stack_region_start += alias_region_size;
- m_stack_region_end += alias_region_size;
- } else {
- m_alias_region_start += stack_region_size;
- m_alias_region_end += stack_region_size;
- }
-
- if (heap_rnd < stack_rnd) {
- m_stack_region_start += heap_region_size;
- m_stack_region_end += heap_region_size;
- } else {
- m_heap_region_start += stack_region_size;
- m_heap_region_end += stack_region_size;
- }
- }
-
- // Setup kernel map region
- if (kernel_map_region_size) {
- m_kernel_map_region_start = alloc_start + kmap_rnd;
- m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
-
- if (alias_rnd < kmap_rnd) {
- m_kernel_map_region_start += alias_region_size;
- m_kernel_map_region_end += alias_region_size;
- } else {
- m_alias_region_start += kernel_map_region_size;
- m_alias_region_end += kernel_map_region_size;
- }
-
- if (heap_rnd < kmap_rnd) {
- m_kernel_map_region_start += heap_region_size;
- m_kernel_map_region_end += heap_region_size;
- } else {
- m_heap_region_start += kernel_map_region_size;
- m_heap_region_end += kernel_map_region_size;
- }
-
- if (stack_region_size) {
- if (stack_rnd < kmap_rnd) {
- m_kernel_map_region_start += stack_region_size;
- m_kernel_map_region_end += stack_region_size;
- } else {
- m_stack_region_start += kernel_map_region_size;
- m_stack_region_end += kernel_map_region_size;
- }
- }
- }
-
- // Set heap and fill members.
- m_current_heap_end = m_heap_region_start;
- m_max_heap_size = 0;
- m_mapped_physical_memory_size = 0;
- m_mapped_unsafe_physical_memory = 0;
- m_mapped_insecure_memory = 0;
- m_mapped_ipc_server_memory = 0;
-
- m_heap_fill_value = 0;
- m_ipc_fill_value = 0;
- m_stack_fill_value = 0;
-
- // Set allocation option.
- m_allocate_option =
- KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
- : KMemoryManager::Direction::FromFront);
-
- // Ensure that we regions inside our address space
- auto IsInAddressSpace = [&](KProcessAddress addr) {
- return m_address_space_start <= addr && addr <= m_address_space_end;
- };
- ASSERT(IsInAddressSpace(m_alias_region_start));
- ASSERT(IsInAddressSpace(m_alias_region_end));
- ASSERT(IsInAddressSpace(m_heap_region_start));
- ASSERT(IsInAddressSpace(m_heap_region_end));
- ASSERT(IsInAddressSpace(m_stack_region_start));
- ASSERT(IsInAddressSpace(m_stack_region_end));
- ASSERT(IsInAddressSpace(m_kernel_map_region_start));
- ASSERT(IsInAddressSpace(m_kernel_map_region_end));
-
- // Ensure that we selected regions that don't overlap
- const KProcessAddress alias_start{m_alias_region_start};
- const KProcessAddress alias_last{m_alias_region_end - 1};
- const KProcessAddress heap_start{m_heap_region_start};
- const KProcessAddress heap_last{m_heap_region_end - 1};
- const KProcessAddress stack_start{m_stack_region_start};
- const KProcessAddress stack_last{m_stack_region_end - 1};
- const KProcessAddress kmap_start{m_kernel_map_region_start};
- const KProcessAddress kmap_last{m_kernel_map_region_end - 1};
- ASSERT(alias_last < heap_start || heap_last < alias_start);
- ASSERT(alias_last < stack_start || stack_last < alias_start);
- ASSERT(alias_last < kmap_start || kmap_last < alias_start);
- ASSERT(heap_last < stack_start || stack_last < heap_start);
- ASSERT(heap_last < kmap_start || kmap_last < heap_start);
-
- m_current_heap_end = m_heap_region_start;
- m_max_heap_size = 0;
- m_mapped_physical_memory_size = 0;
- m_memory_pool = pool;
-
- m_page_table_impl = std::make_unique<Common::PageTable>();
- m_page_table_impl->Resize(m_address_space_width, PageBits);
-
- // Initialize our memory block manager.
- R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
- m_memory_block_slab_manager));
-}
-
-void KPageTable::Finalize() {
- auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
- if (Settings::IsFastmemEnabled()) {
- m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
- }
- };
-
- // Finalize memory blocks.
- m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
-
- // Release any insecure mapped memory.
- if (m_mapped_insecure_memory) {
- UNIMPLEMENTED();
- }
-
- // Release any ipc server memory.
- if (m_mapped_ipc_server_memory) {
- UNIMPLEMENTED();
- }
-
- // Close the backing page table, as the destructor is not called for guest objects.
- m_page_table_impl.reset();
-}
-
-Result KPageTable::MapProcessCode(KProcessAddress addr, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- const u64 size{num_pages * PageSize};
-
- // Validate the mapping request.
- R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the destination memory is unmapped.
- R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
-
- // Allocate and open.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, num_pages,
- KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option)));
-
- R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Validate the mapping request.
- R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
- ResultInvalidMemoryRegion);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the source memory is normal heap.
- KMemoryState src_state{};
- KMemoryPermission src_perm{};
- size_t num_src_allocator_blocks{};
- R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
- src_address, size, KMemoryState::All, KMemoryState::Normal,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Verify that the destination memory is unmapped.
- size_t num_dst_allocator_blocks{};
- R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Map the code memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being mapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
- AddRegionToPages(src_address, num_pages, pg);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reprotect the source as kernel-read/not mapped.
- const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
- KMemoryPermission::NotMapped);
- R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
-
- // Ensure that we unprotect the source pages on failure.
- auto unprot_guard = SCOPE_GUARD({
- ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
- .IsSuccess());
- });
-
- // Map the alias pages.
- const KPageProperties dst_properties = {new_perm, false, false,
- DisableMergeAttribute::DisableHead};
- R_TRY(
- this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
-
- // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
- // failure.
- unprot_guard.Cancel();
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
- src_state, new_perm, KMemoryAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
- KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy) {
- // Validate the mapping request.
- R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
- ResultInvalidMemoryRegion);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify that the source memory is locked normal heap.
- size_t num_src_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
- KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked));
-
- // Verify that the destination memory is aliasable code.
- size_t num_dst_allocator_blocks{};
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
- KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
-
- // Determine whether any pages being unmapped are code.
- bool any_code_pages = false;
- {
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
- while (true) {
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Check if the memory has code flag.
- if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
- any_code_pages = true;
- break;
- }
-
- // Check if we're done.
- if (dst_address + size - 1 <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- }
- }
-
- // Ensure that we maintain the instruction cache.
- bool reprotected_pages = false;
- SCOPE_EXIT({
- if (reprotected_pages && any_code_pages) {
- if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
- m_system.InvalidateCpuInstructionCacheRange(GetInteger(dst_address), size);
- } else {
- m_system.InvalidateCpuInstructionCaches();
- }
- }
- });
-
- // Unmap.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create an update allocator for the source.
- Result src_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Unmap the aliased copy of the pages.
- R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Try to set the permissions for the source pages back to what they should be.
- R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
- OperationType::ChangePermissions));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
- m_memory_block_manager.Update(
- std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
-
- // Note that we reprotected pages.
- reprotected_pages = true;
- }
-
- R_SUCCEED();
-}
-
-KProcessAddress KPageTable::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages) {
- KProcessAddress address = 0;
-
- if (num_pages <= region_num_pages) {
- if (this->IsAslrEnabled()) {
- UNIMPLEMENTED();
- }
- // Find the first free area.
- if (address == 0) {
- address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
- alignment, offset, guard_pages);
- }
- }
-
- return address;
-}
-
-Result KPageTable::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
- ASSERT(this->IsLockedByCurrentThread());
-
- const size_t size = num_pages * PageSize;
-
- // We're making a new group, not adding to an existing one.
- R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr)),
- ResultInvalidCurrentMemory);
-
- // Prepare tracking variables.
- KPhysicalAddress cur_addr = next_entry.phys_addr;
- size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
- size_t tot_size = cur_size;
-
- // Iterate, adding to group as we go.
- const auto& memory_layout = m_system.Kernel().MemoryLayout();
- while (tot_size < size) {
- R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context),
- ResultInvalidCurrentMemory);
-
- if (next_entry.phys_addr != (cur_addr + cur_size)) {
- const size_t cur_pages = cur_size / PageSize;
-
- R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
- R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
- cur_addr = next_entry.phys_addr;
- cur_size = next_entry.block_size;
- } else {
- cur_size += next_entry.block_size;
- }
-
- tot_size += next_entry.block_size;
- }
-
- // Ensure we add the right amount for the last block.
- if (tot_size > size) {
- cur_size -= (tot_size - size);
- }
-
- // Add the last block.
- const size_t cur_pages = cur_size / PageSize;
- R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
- R_TRY(pg.AddBlock(cur_addr, cur_pages));
-
- R_SUCCEED();
-}
-
-bool KPageTable::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
- ASSERT(this->IsLockedByCurrentThread());
-
- const size_t size = num_pages * PageSize;
- const auto& memory_layout = m_system.Kernel().MemoryLayout();
-
- // Empty groups are necessarily invalid.
- if (pg.empty()) {
- return false;
- }
-
- // We're going to validate that the group we'd expect is the group we see.
- auto cur_it = pg.begin();
- KPhysicalAddress cur_block_address = cur_it->GetAddress();
- size_t cur_block_pages = cur_it->GetNumPages();
-
- auto UpdateCurrentIterator = [&]() {
- if (cur_block_pages == 0) {
- if ((++cur_it) == pg.end()) {
- return false;
- }
-
- cur_block_address = cur_it->GetAddress();
- cur_block_pages = cur_it->GetNumPages();
- }
- return true;
- };
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- if (!m_page_table_impl->BeginTraversal(next_entry, context, GetInteger(addr))) {
- return false;
- }
-
- // Prepare tracking variables.
- KPhysicalAddress cur_addr = next_entry.phys_addr;
- size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
- size_t tot_size = cur_size;
-
- // Iterate, comparing expected to actual.
- while (tot_size < size) {
- if (!m_page_table_impl->ContinueTraversal(next_entry, context)) {
- return false;
- }
-
- if (next_entry.phys_addr != (cur_addr + cur_size)) {
- const size_t cur_pages = cur_size / PageSize;
-
- if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
- return false;
- }
-
- if (!UpdateCurrentIterator()) {
- return false;
- }
-
- if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
- return false;
- }
-
- cur_block_address += cur_size;
- cur_block_pages -= cur_pages;
- cur_addr = next_entry.phys_addr;
- cur_size = next_entry.block_size;
- } else {
- cur_size += next_entry.block_size;
- }
-
- tot_size += next_entry.block_size;
- }
-
- // Ensure we compare the right amount for the last block.
- if (tot_size > size) {
- cur_size -= (tot_size - size);
- }
-
- if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
- return false;
- }
-
- if (!UpdateCurrentIterator()) {
- return false;
- }
-
- return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
-}
-
-Result KPageTable::UnmapProcessMemory(KProcessAddress dst_addr, size_t size,
- KPageTable& src_page_table, KProcessAddress src_addr) {
- // Acquire the table locks.
- KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
-
- const size_t num_pages{size / PageSize};
-
- // Check that the memory is mapped in the destination process.
- size_t num_allocator_blocks;
- R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
- KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Check that the memory is mapped in the source process.
- R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
- KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- R_TRY(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Apply the memory block update.
- m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- m_system.InvalidateCpuInstructionCaches();
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- KProcessAddress address, size_t size,
- KMemoryPermission test_perm, KMemoryState dst_state) {
- // Validate pre-conditions.
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
- test_perm == KMemoryPermission::UserRead);
-
- // Check that the address is in range.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Get the source permission.
- const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
- ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
- : KMemoryPermission::UserRead;
-
- // Get aligned extents.
- const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
- const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
- const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
-
- const auto aligned_src_last = (aligned_src_end)-1;
- const auto mapping_src_last = (mapping_src_end)-1;
-
- // Get the test state and attribute mask.
- KMemoryState test_state;
- KMemoryAttribute test_attr_mask;
- switch (dst_state) {
- case KMemoryState::Ipc:
- test_state = KMemoryState::FlagCanUseIpc;
- test_attr_mask =
- KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonSecureIpc:
- test_state = KMemoryState::FlagCanUseNonSecureIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonDeviceIpc:
- test_state = KMemoryState::FlagCanUseNonDeviceIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- default:
- R_THROW(ResultInvalidCombination);
- }
-
- // Ensure that on failure, we roll back appropriately.
- size_t mapped_size = 0;
- ON_RESULT_FAILURE {
- if (mapped_size > 0) {
- this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
- src_perm);
- }
- };
-
- size_t blocks_needed = 0;
-
- // Iterate, mapping as needed.
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
- while (true) {
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Validate the current block.
- R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
- test_attr_mask, KMemoryAttribute::None));
-
- if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
- info.GetAddress() < GetInteger(mapping_src_end)) {
- const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
- ? info.GetAddress()
- : (mapping_src_start);
- const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
- : (mapping_src_end);
- const size_t cur_size = cur_end - cur_start;
-
- if (info.GetAddress() < GetInteger(mapping_src_start)) {
- ++blocks_needed;
- }
- if (mapping_src_last < info.GetLastAddress()) {
- ++blocks_needed;
- }
-
- // Set the permissions on the block, if we need to.
- if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
- R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
- OperationType::ChangePermissions));
- }
-
- // Note that we mapped this part.
- mapped_size += cur_size;
- }
-
- // If the block is at the end, we're done.
- if (aligned_src_last <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- ASSERT(it != m_memory_block_manager.end());
- }
-
- if (out_blocks_needed != nullptr) {
- ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- *out_blocks_needed = blocks_needed;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
- KProcessAddress src_addr, KMemoryPermission test_perm,
- KMemoryState dst_state, KPageTable& src_page_table,
- bool send) {
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(src_page_table.IsLockedByCurrentThread());
-
- // Check that we can theoretically map.
- const KProcessAddress region_start = m_alias_region_start;
- const size_t region_size = m_alias_region_end - m_alias_region_start;
- R_UNLESS(size < region_size, ResultOutOfAddressSpace);
-
- // Get aligned source extents.
- const KProcessAddress src_start = src_addr;
- const KProcessAddress src_end = src_addr + size;
- const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
- const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
- const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
- const KProcessAddress mapping_src_end =
- Common::AlignDown(GetInteger(src_start) + size, PageSize);
- const size_t aligned_src_size = aligned_src_end - aligned_src_start;
- const size_t mapping_src_size =
- (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
-
- // Select a random address to map at.
- KProcessAddress dst_addr =
- this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
- PageSize, 0, this->GetNumGuardPages());
-
- R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
-
- // Check that we can perform the operation we're about to perform.
- ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reserve space for any partial pages we allocate.
- const size_t unmapped_size = aligned_src_size - mapping_src_size;
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Ensure that we manage page references correctly.
- KPhysicalAddress start_partial_page = 0;
- KPhysicalAddress end_partial_page = 0;
- KProcessAddress cur_mapped_addr = dst_addr;
-
- // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
- // free on scope exit.
- SCOPE_EXIT({
- if (start_partial_page != 0) {
- m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
- }
- if (end_partial_page != 0) {
- m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
- }
- });
-
- ON_RESULT_FAILURE {
- if (cur_mapped_addr != dst_addr) {
- ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
- KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
- }
- };
-
- // Allocate the start page as needed.
- if (aligned_src_start < mapping_src_start) {
- start_partial_page =
- m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
- R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
- }
-
- // Allocate the end page as needed.
- if (mapping_src_end < aligned_src_end &&
- (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
- end_partial_page =
- m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
- R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
- }
-
- // Get the implementation.
- auto& src_impl = src_page_table.PageTableImpl();
-
- // Get the fill value for partial pages.
- const auto fill_val = m_ipc_fill_value;
-
- // Begin traversal.
- Common::PageTable::TraversalContext context;
- Common::PageTable::TraversalEntry next_entry;
- bool traverse_valid =
- src_impl.BeginTraversal(next_entry, context, GetInteger(aligned_src_start));
- ASSERT(traverse_valid);
-
- // Prepare tracking variables.
- KPhysicalAddress cur_block_addr = next_entry.phys_addr;
- size_t cur_block_size =
- next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
- size_t tot_block_size = cur_block_size;
-
- // Map the start page, if we have one.
- if (start_partial_page != 0) {
- // Ensure the page holds correct data.
- const KVirtualAddress start_partial_virt =
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
- if (send) {
- const size_t partial_offset = src_start - aligned_src_start;
- size_t copy_size, clear_size;
- if (src_end < mapping_src_start) {
- copy_size = size;
- clear_size = mapping_src_start - src_end;
- } else {
- copy_size = mapping_src_start - src_start;
- clear_size = 0;
- }
-
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
- partial_offset);
- std::memcpy(
- m_memory->GetPointer<void>(GetInteger(start_partial_virt) + partial_offset),
- m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr)) +
- partial_offset),
- copy_size);
- if (clear_size > 0) {
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt) +
- partial_offset + copy_size),
- fill_val, clear_size);
- }
- } else {
- std::memset(m_memory->GetPointer<void>(GetInteger(start_partial_virt)), fill_val,
- PageSize);
- }
-
- // Map the page.
- R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
-
- // Update tracking extents.
- cur_mapped_addr += PageSize;
- cur_block_addr += PageSize;
- cur_block_size -= PageSize;
-
- // If the block's size was one page, we may need to continue traversal.
- if (cur_block_size == 0 && aligned_src_size > PageSize) {
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- cur_block_addr = next_entry.phys_addr;
- cur_block_size = next_entry.block_size;
- tot_block_size += next_entry.block_size;
- }
- }
-
- // Map the remaining pages.
- while (aligned_src_start + tot_block_size < mapping_src_end) {
- // Continue the traversal.
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- // Process the block.
- if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
- // Map the block we've been processing so far.
- R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
- cur_block_addr));
-
- // Update tracking extents.
- cur_mapped_addr += cur_block_size;
- cur_block_addr = next_entry.phys_addr;
- cur_block_size = next_entry.block_size;
- } else {
- cur_block_size += next_entry.block_size;
- }
- tot_block_size += next_entry.block_size;
- }
-
- // Handle the last direct-mapped page.
- if (const KProcessAddress mapped_block_end =
- aligned_src_start + tot_block_size - cur_block_size;
- mapped_block_end < mapping_src_end) {
- const size_t last_block_size = mapping_src_end - mapped_block_end;
-
- // Map the last block.
- R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
- cur_block_addr));
-
- // Update tracking extents.
- cur_mapped_addr += last_block_size;
- cur_block_addr += last_block_size;
- if (mapped_block_end + cur_block_size < aligned_src_end &&
- cur_block_size == last_block_size) {
- traverse_valid = src_impl.ContinueTraversal(next_entry, context);
- ASSERT(traverse_valid);
-
- cur_block_addr = next_entry.phys_addr;
- }
- }
-
- // Map the end page, if we have one.
- if (end_partial_page != 0) {
- // Ensure the page holds correct data.
- const KVirtualAddress end_partial_virt =
- GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
- if (send) {
- const size_t copy_size = src_end - mapping_src_end;
- std::memcpy(m_memory->GetPointer<void>(GetInteger(end_partial_virt)),
- m_memory->GetPointer<void>(GetInteger(GetHeapVirtualAddress(
- m_system.Kernel().MemoryLayout(), cur_block_addr))),
- copy_size);
- std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt) + copy_size),
- fill_val, PageSize - copy_size);
- } else {
- std::memset(m_memory->GetPointer<void>(GetInteger(end_partial_virt)), fill_val,
- PageSize);
- }
-
- // Map the page.
- R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
- }
-
- // Update memory blocks to reflect our changes
- m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
- dst_state, test_perm, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Set the output address.
- *out_addr = dst_addr + (src_start - aligned_src_start);
-
- // We succeeded.
- memory_reservation.Commit();
- R_SUCCEED();
-}
-
-Result KPageTable::SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
- KPageTable& src_page_table, KMemoryPermission test_perm,
- KMemoryState dst_state, bool send) {
- // For convenience, alias this.
- KPageTable& dst_page_table = *this;
-
- // Acquire the table locks.
- KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(std::addressof(src_page_table));
-
- // Perform client setup.
- size_t num_allocator_blocks;
- R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
- std::addressof(num_allocator_blocks), src_addr, size,
- test_perm, dst_state));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- src_page_table.m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Get the mapped extents.
- const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
- const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
- const size_t src_map_size = src_map_end - src_map_start;
-
- // Ensure that we clean up appropriately if we fail after this.
- const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite)
- ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
- : KMemoryPermission::UserRead;
- ON_RESULT_FAILURE {
- if (src_map_end > src_map_start) {
- src_page_table.CleanupForIpcClientOnServerSetupFailure(
- updater.GetPageList(), src_map_start, src_map_size, src_perm);
- }
- };
-
- // Perform server setup.
- R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
- src_page_table, send));
-
- // If anything was mapped, ipc-lock the pages.
- if (src_map_start < src_map_end) {
- // Get the source permission.
- src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
- (src_map_end - src_map_start) / PageSize,
- &KMemoryBlock::LockForIpc, src_perm);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcServer(KProcessAddress address, size_t size,
- KMemoryState dst_state) {
- // Validate the address.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, dst_state, KMemoryPermission::UserRead,
- KMemoryPermission::UserRead, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Get aligned extents.
- const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
- const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
- const size_t aligned_size = aligned_end - aligned_start;
- const size_t aligned_num_pages = aligned_size / PageSize;
-
- // Unmap the pages.
- R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
-
- // Update memory blocks.
- m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
- KMemoryState::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- // Release from the resource limit as relevant.
- const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
- const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size);
-
- R_SUCCEED();
-}
-
-Result KPageTable::CleanupForIpcClient(KProcessAddress address, size_t size,
- KMemoryState dst_state) {
- // Validate the address.
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Get aligned source extents.
- const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
- const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
- const KProcessAddress mapping_last = mapping_end - 1;
- const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
-
- // If nothing was mapped, we're actually done immediately.
- R_SUCCEED_IF(mapping_size == 0);
-
- // Get the test state and attribute mask.
- KMemoryState test_state;
- KMemoryAttribute test_attr_mask;
- switch (dst_state) {
- case KMemoryState::Ipc:
- test_state = KMemoryState::FlagCanUseIpc;
- test_attr_mask =
- KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonSecureIpc:
- test_state = KMemoryState::FlagCanUseNonSecureIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- case KMemoryState::NonDeviceIpc:
- test_state = KMemoryState::FlagCanUseNonDeviceIpc;
- test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
- break;
- default:
- R_THROW(ResultInvalidCombination);
- }
-
- // Lock the table.
- // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
- // convention elsewhere in KPageTable.
- KScopedLightLock lk(m_general_lock);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Ensure that on failure, we roll back appropriately.
- size_t mapped_size = 0;
- ON_RESULT_FAILURE {
- if (mapped_size > 0) {
- // Determine where the mapping ends.
- const auto mapped_end = (mapping_start) + mapped_size;
- const auto mapped_last = mapped_end - 1;
-
- // Get current and next iterators.
- KMemoryBlockManager::const_iterator start_it =
- m_memory_block_manager.FindIterator(mapping_start);
- KMemoryBlockManager::const_iterator next_it = start_it;
- ++next_it;
-
- // Get the current block info.
- KMemoryInfo cur_info = start_it->GetMemoryInfo();
-
- // Create tracking variables.
- KProcessAddress cur_address = cur_info.GetAddress();
- size_t cur_size = cur_info.GetSize();
- bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
- bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
- bool first =
- cur_info.GetIpcDisableMergeCount() == 1 &&
- (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
- KMemoryBlockDisableMergeAttribute::None;
-
- while (((cur_address) + cur_size - 1) < mapped_last) {
- // Check that we have a next block.
- ASSERT(next_it != m_memory_block_manager.end());
-
- // Get the next info.
- const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
- // Check if we can consolidate the next block's permission set with the current one.
-
- const bool next_perm_eq =
- next_info.GetPermission() == next_info.GetOriginalPermission();
- const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
- if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
- cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
- // We can consolidate the reprotection for the current and next block into a
- // single call.
- cur_size += next_info.GetSize();
- } else {
- // We have to operate on the current block.
- if ((cur_needs_set_perm || first) && !cur_perm_eq) {
- ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
-
- // Advance.
- cur_address = next_info.GetAddress();
- cur_size = next_info.GetSize();
- first = false;
- }
-
- // Advance.
- cur_info = next_info;
- cur_perm_eq = next_perm_eq;
- cur_needs_set_perm = next_needs_set_perm;
- ++next_it;
- }
-
- // Process the last block.
- if ((first || cur_needs_set_perm) && !cur_perm_eq) {
- ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
- }
- };
-
- // Iterate, reprotecting as needed.
- {
- // Get current and next iterators.
- KMemoryBlockManager::const_iterator start_it =
- m_memory_block_manager.FindIterator(mapping_start);
- KMemoryBlockManager::const_iterator next_it = start_it;
- ++next_it;
-
- // Validate the current block.
- KMemoryInfo cur_info = start_it->GetMemoryInfo();
- ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
- KMemoryPermission::None,
- test_attr_mask | KMemoryAttribute::IpcLocked,
- KMemoryAttribute::IpcLocked)
- .IsSuccess());
-
- // Create tracking variables.
- KProcessAddress cur_address = cur_info.GetAddress();
- size_t cur_size = cur_info.GetSize();
- bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
- bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
- bool first =
- cur_info.GetIpcDisableMergeCount() == 1 &&
- (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
- KMemoryBlockDisableMergeAttribute::None;
-
- while ((cur_address + cur_size - 1) < mapping_last) {
- // Check that we have a next block.
- ASSERT(next_it != m_memory_block_manager.end());
-
- // Get the next info.
- const KMemoryInfo next_info = next_it->GetMemoryInfo();
-
- // Validate the next block.
- ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
- KMemoryPermission::None, KMemoryPermission::None,
- test_attr_mask | KMemoryAttribute::IpcLocked,
- KMemoryAttribute::IpcLocked)
- .IsSuccess());
-
- // Check if we can consolidate the next block's permission set with the current one.
- const bool next_perm_eq =
- next_info.GetPermission() == next_info.GetOriginalPermission();
- const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
- if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
- cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
- // We can consolidate the reprotection for the current and next block into a single
- // call.
- cur_size += next_info.GetSize();
- } else {
- // We have to operate on the current block.
- if ((cur_needs_set_perm || first) && !cur_perm_eq) {
- R_TRY(Operate(cur_address, cur_size / PageSize,
- cur_needs_set_perm ? cur_info.GetOriginalPermission()
- : cur_info.GetPermission(),
- OperationType::ChangePermissions));
- }
-
- // Mark that we mapped the block.
- mapped_size += cur_size;
-
- // Advance.
- cur_address = next_info.GetAddress();
- cur_size = next_info.GetSize();
- first = false;
- }
-
- // Advance.
- cur_info = next_info;
- cur_perm_eq = next_perm_eq;
- cur_needs_set_perm = next_needs_set_perm;
- ++next_it;
- }
-
- // Process the last block.
- const auto lock_count =
- cur_info.GetIpcLockCount() +
- (next_it != m_memory_block_manager.end()
- ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
- : 0);
- if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
- R_TRY(Operate(cur_address, cur_size / PageSize,
- cur_needs_set_perm ? cur_info.GetOriginalPermission()
- : cur_info.GetPermission(),
- OperationType::ChangePermissions));
- }
- }
-
- // Create an update allocator.
- // NOTE: Guaranteed zero blocks needed here.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, 0);
- R_TRY(allocator_result);
-
- // Unlock the pages.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
- mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
- KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
- KProcessAddress address, size_t size,
- KMemoryPermission prot_perm) {
- ASSERT(this->IsLockedByCurrentThread());
- ASSERT(Common::IsAligned(GetInteger(address), PageSize));
- ASSERT(Common::IsAligned(size, PageSize));
-
- // Get the mapped extents.
- const KProcessAddress src_map_start = address;
- const KProcessAddress src_map_end = address + size;
- const KProcessAddress src_map_last = src_map_end - 1;
-
- // This function is only invoked when there's something to do.
- ASSERT(src_map_end > src_map_start);
-
- // Iterate over blocks, fixing permissions.
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
- while (true) {
- const KMemoryInfo info = it->GetMemoryInfo();
-
- const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
- ? info.GetAddress()
- : GetInteger(src_map_start);
- const auto cur_end =
- src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
-
- // If we can, fix the protections on the block.
- if ((info.GetIpcLockCount() == 0 &&
- (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
- (info.GetIpcLockCount() != 0 &&
- (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
- // Check if we actually need to fix the protections on the block.
- if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
- (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
- ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
- OperationType::ChangePermissions)
- .IsSuccess());
- }
- }
-
- // If we're past the end of the region, we're done.
- if (src_map_last <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- ASSERT(it != m_memory_block_manager.end());
- }
-}
-
-Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
- // Calculate the last address for convenience.
- const KProcessAddress last_address = address + size - 1;
-
- // Define iteration variables.
- KProcessAddress cur_address;
- size_t mapped_size;
-
- // The entire mapping process can be retried.
- while (true) {
- // Check if the memory is already mapped.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Iterate over the memory.
- cur_address = address;
- mapped_size = 0;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- if (info.GetState() != KMemoryState::Free) {
- mapped_size += (last_address + 1 - cur_address);
- }
- break;
- }
-
- // Track the memory if it's mapped.
- if (info.GetState() != KMemoryState::Free) {
- mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If the size mapped is the size requested, we've nothing to do.
- R_SUCCEED_IF(size == mapped_size);
- }
-
- // Allocate and map the memory.
- {
- // Reserve the memory from the process resource limit.
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Allocate pages for the new memory.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
- &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
-
- // If we fail in the next bit (or retry), we need to cleanup the pages.
- // auto pg_guard = SCOPE_GUARD {
- // pg.OpenFirst();
- // pg.Close();
- //};
-
- // Map the memory.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- size_t num_allocator_blocks = 0;
-
- // Verify that nobody has mapped memory since we first checked.
- {
- // Iterate over the memory.
- size_t checked_mapped_size = 0;
- cur_address = address;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- const bool is_free = info.GetState() == KMemoryState::Free;
- if (is_free) {
- if (info.GetAddress() < GetInteger(address)) {
- ++num_allocator_blocks;
- }
- if (last_address < info.GetLastAddress()) {
- ++num_allocator_blocks;
- }
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- if (!is_free) {
- checked_mapped_size += (last_address + 1 - cur_address);
- }
- break;
- }
-
- // Track the memory if it's mapped.
- if (!is_free) {
- checked_mapped_size +=
- KProcessAddress(info.GetEndAddress()) - cur_address;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If the size now isn't what it was before, somebody mapped or unmapped
- // concurrently. If this happened, retry.
- if (mapped_size != checked_mapped_size) {
- continue;
- }
- }
-
- // Create an update allocator.
- ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Prepare to iterate over the memory.
- auto pg_it = pg.begin();
- KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
-
- // Reset the current tracking address, and make sure we clean up on failure.
- // pg_guard.Cancel();
- cur_address = address;
- ON_RESULT_FAILURE {
- if (cur_address > address) {
- const KProcessAddress last_unmap_address = cur_address - 1;
-
- // Iterate, unmapping the pages.
- cur_address = address;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If the memory state is free, we mapped it and need to unmap it.
- if (info.GetState() == KMemoryState::Free) {
- // Determine the range to unmap.
- const size_t cur_pages =
- std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_unmap_address + 1 - cur_address) /
- PageSize;
-
- // Unmap.
- ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
- OperationType::Unmap)
- .IsSuccess());
- }
-
- // Check if we're done.
- if (last_unmap_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
- }
-
- // Release any remaining unmapped memory.
- m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
- m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
- for (++pg_it; pg_it != pg.end(); ++pg_it) {
- m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
- pg_it->GetNumPages());
- m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
- pg_it->GetNumPages());
- }
- };
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If it's unmapped, we need to map it.
- if (info.GetState() == KMemoryState::Free) {
- // Determine the range to map.
- size_t map_pages =
- std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
-
- // While we have pages to map, map them.
- {
- // Create a page group for the current mapping range.
- KPageGroup cur_pg(m_kernel, m_block_info_manager);
- {
- ON_RESULT_FAILURE_2 {
- cur_pg.OpenFirst();
- cur_pg.Close();
- };
-
- size_t remain_pages = map_pages;
- while (remain_pages > 0) {
- // Check if we're at the end of the physical block.
- if (pg_pages == 0) {
- // Ensure there are more pages to map.
- ASSERT(pg_it != pg.end());
-
- // Advance our physical block.
- ++pg_it;
- pg_phys_addr = pg_it->GetAddress();
- pg_pages = pg_it->GetNumPages();
- }
-
- // Add whatever we can to the current block.
- const size_t cur_pages = std::min(pg_pages, remain_pages);
- R_TRY(cur_pg.AddBlock(pg_phys_addr +
- ((pg_pages - cur_pages) * PageSize),
- cur_pages));
-
- // Advance.
- remain_pages -= cur_pages;
- pg_pages -= cur_pages;
- }
- }
-
- // Map the pages.
- R_TRY(this->Operate(cur_address, map_pages, cur_pg,
- OperationType::MapFirstGroup));
- }
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // We succeeded, so commit the memory reservation.
- memory_reservation.Commit();
-
- // Increase our tracked mapped size.
- m_mapped_physical_memory_size += (size - mapped_size);
-
- // Update the relevant memory blocks.
- m_memory_block_manager.UpdateIfMatch(
- std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- address == this->GetAliasRegionStart()
- ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
- }
- }
- }
-}
-
-Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock phys_lk(m_map_physical_memory_lock);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Calculate the last address for convenience.
- const KProcessAddress last_address = address + size - 1;
-
- // Define iteration variables.
- KProcessAddress map_start_address = 0;
- KProcessAddress map_last_address = 0;
-
- KProcessAddress cur_address;
- size_t mapped_size;
- size_t num_allocator_blocks = 0;
-
- // Check if the memory is mapped.
- {
- // Iterate over the memory.
- cur_address = address;
- mapped_size = 0;
-
- auto it = m_memory_block_manager.FindIterator(cur_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Verify the memory's state.
- const bool is_normal = info.GetState() == KMemoryState::Normal &&
- info.GetAttribute() == KMemoryAttribute::None;
- const bool is_free = info.GetState() == KMemoryState::Free;
- R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
-
- if (is_normal) {
- R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
-
- if (map_start_address == 0) {
- map_start_address = cur_address;
- }
- map_last_address =
- (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
-
- if (info.GetAddress() < GetInteger(address)) {
- ++num_allocator_blocks;
- }
- if (last_address < info.GetLastAddress()) {
- ++num_allocator_blocks;
- }
-
- mapped_size += (map_last_address + 1 - cur_address);
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // If there's nothing mapped, we've nothing to do.
- R_SUCCEED_IF(mapped_size == 0);
- }
-
- // Create an update allocator.
- ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Separate the mapping.
- R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
- KMemoryPermission::None, OperationType::Separate));
-
- // Reset the current tracking address, and make sure we clean up on failure.
- cur_address = address;
-
- // Iterate over the memory, unmapping as we go.
- auto it = m_memory_block_manager.FindIterator(cur_address);
-
- const auto clear_merge_attr =
- (it->GetState() == KMemoryState::Normal &&
- it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
- ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None;
-
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // If the memory state is normal, we need to unmap it.
- if (info.GetState() == KMemoryState::Normal) {
- // Determine the range to unmap.
- const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
- last_address + 1 - cur_address) /
- PageSize;
-
- // Unmap.
- ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
- .IsSuccess());
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- cur_address = info.GetEndAddress();
- ++it;
- }
-
- // Release the memory resource.
- m_mapped_physical_memory_size -= mapped_size;
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size);
-
- // Update memory blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
- KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- clear_merge_attr);
-
- // We succeeded.
- R_SUCCEED();
-}
-
-Result KPageTable::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that the source address's state is valid.
- KMemoryState src_state;
- size_t num_src_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
- std::addressof(num_src_allocator_blocks), src_address, size,
- KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Validate that the dst address's state is valid.
- size_t num_dst_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result;
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result;
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Map the memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being unmapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Create the page group representing the source.
- R_TRY(this->MakePageGroup(pg, src_address, num_pages));
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Reprotect the source as kernel-read/not mapped.
- const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
- KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
- const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
- const KPageProperties src_properties = {new_src_perm, false, false,
- DisableMergeAttribute::DisableHeadBodyTail};
- R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
- OperationType::ChangePermissions));
-
- // Ensure that we unprotect the source pages on failure.
- ON_RESULT_FAILURE {
- const KPageProperties unprotect_properties = {
- KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::EnableHeadBodyTail};
- ASSERT(this->Operate(src_address, num_pages, unprotect_properties.perm,
- OperationType::ChangePermissions) == ResultSuccess);
- };
-
- // Map the alias pages.
- const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
- false));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
- src_state, new_src_perm, new_src_attr,
- KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
- size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that the source address's state is valid.
- KMemoryState src_state;
- size_t num_src_allocator_blocks;
- R_TRY(this->CheckMemoryState(
- std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
- src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
- KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
- KMemoryAttribute::All, KMemoryAttribute::Locked));
-
- // Validate that the dst address's state is valid.
- KMemoryPermission dst_perm;
- size_t num_dst_allocator_blocks;
- R_TRY(this->CheckMemoryState(
- nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
- dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Create an update allocator for the source.
- Result src_allocator_result;
- KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
- m_memory_block_slab_manager,
- num_src_allocator_blocks);
- R_TRY(src_allocator_result);
-
- // Create an update allocator for the destination.
- Result dst_allocator_result;
- KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
- m_memory_block_slab_manager,
- num_dst_allocator_blocks);
- R_TRY(dst_allocator_result);
-
- // Unmap the memory.
- {
- // Determine the number of pages being operated on.
- const size_t num_pages = size / PageSize;
-
- // Create page groups for the memory being unmapped.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Create the page group representing the destination.
- R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
-
- // Ensure the page group is the valid for the source.
- R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Unmap the aliased copy of the pages.
- const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(
- this->Operate(dst_address, num_pages, dst_unmap_properties.perm, OperationType::Unmap));
-
- // Ensure that we re-map the aliased pages on failure.
- ON_RESULT_FAILURE {
- this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
- };
-
- // Try to set the permissions for the source pages back to what they should be.
- const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
- DisableMergeAttribute::EnableAndMergeHeadBodyTail};
- R_TRY(this->Operate(src_address, num_pages, src_properties.perm,
- OperationType::ChangePermissions));
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(
- std::addressof(src_allocator), src_address, num_pages, src_state,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
- m_memory_block_manager.Update(
- std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
- size_t num_pages, KMemoryPermission perm) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Create a page group to hold the pages we allocate.
- KPageGroup pg{m_kernel, m_block_info_manager};
-
- // Allocate the pages.
- R_TRY(
- m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
-
- // Ensure that the page group is closed when we're done working with it.
- SCOPE_EXIT({ pg.Close(); });
-
- // Clear all pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
- it.GetSize());
- }
-
- // Map the pages.
- R_RETURN(this->Operate(address, num_pages, pg, OperationType::MapGroup));
-}
-
-Result KPageTable::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
- const KPageGroup& pg, const KPageProperties properties,
- bool reuse_ll) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Note the current address, so that we can iterate.
- const KProcessAddress start_address = address;
- KProcessAddress cur_address = address;
-
- // Ensure that we clean up on failure.
- ON_RESULT_FAILURE {
- ASSERT(!reuse_ll);
- if (cur_address != start_address) {
- const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- ASSERT(this->Operate(start_address, (cur_address - start_address) / PageSize,
- unmap_properties.perm, OperationType::Unmap) == ResultSuccess);
- }
- };
-
- // Iterate, mapping all pages in the group.
- for (const auto& block : pg) {
- // Map and advance.
- const KPageProperties cur_properties =
- (cur_address == start_address)
- ? properties
- : KPageProperties{properties.perm, properties.io, properties.uncached,
- DisableMergeAttribute::None};
- this->Operate(cur_address, block.GetNumPages(), cur_properties.perm, OperationType::Map,
- block.GetAddress());
- cur_address += block.GetSize();
- }
-
- // We succeeded!
- R_SUCCEED();
-}
-
-void KPageTable::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
- const KPageGroup& pg) {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Note the current address, so that we can iterate.
- const KProcessAddress start_address = address;
- const KProcessAddress last_address = start_address + size - 1;
- const KProcessAddress end_address = last_address + 1;
-
- // Iterate over the memory.
- auto pg_it = pg.begin();
- ASSERT(pg_it != pg.end());
-
- KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
- size_t pg_pages = pg_it->GetNumPages();
-
- auto it = m_memory_block_manager.FindIterator(start_address);
- while (true) {
- // Check that the iterator is valid.
- ASSERT(it != m_memory_block_manager.end());
-
- // Get the memory info.
- const KMemoryInfo info = it->GetMemoryInfo();
-
- // Determine the range to map.
- KProcessAddress map_address = std::max<KProcessAddress>(info.GetAddress(), start_address);
- const KProcessAddress map_end_address =
- std::min<KProcessAddress>(info.GetEndAddress(), end_address);
- ASSERT(map_end_address != map_address);
-
- // Determine if we should disable head merge.
- const bool disable_head_merge =
- info.GetAddress() >= GetInteger(start_address) &&
- True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
- const KPageProperties map_properties = {
- info.GetPermission(), false, false,
- disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
-
- // While we have pages to map, map them.
- size_t map_pages = (map_end_address - map_address) / PageSize;
- while (map_pages > 0) {
- // Check if we're at the end of the physical block.
- if (pg_pages == 0) {
- // Ensure there are more pages to map.
- ASSERT(pg_it != pg.end());
-
- // Advance our physical block.
- ++pg_it;
- pg_phys_addr = pg_it->GetAddress();
- pg_pages = pg_it->GetNumPages();
- }
-
- // Map whatever we can.
- const size_t cur_pages = std::min(pg_pages, map_pages);
- ASSERT(this->Operate(map_address, map_pages, map_properties.perm, OperationType::Map,
- pg_phys_addr) == ResultSuccess);
-
- // Advance.
- map_address += cur_pages * PageSize;
- map_pages -= cur_pages;
-
- pg_phys_addr += cur_pages * PageSize;
- pg_pages -= cur_pages;
- }
-
- // Check if we're done.
- if (last_address <= info.GetLastAddress()) {
- break;
- }
-
- // Advance.
- ++it;
- }
-
- // Check that we re-mapped precisely the page group.
- ASSERT((++pg_it) == pg.end());
-}
-
-Result KPageTable::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, bool is_pa_valid,
- KProcessAddress region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm) {
- ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
-
- // Ensure this is a valid map request.
- R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Find a random address to map at.
- KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
- 0, this->GetNumGuardPages());
- R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(Common::IsAligned(GetInteger(addr), alignment));
- ASSERT(this->CanContain(addr, num_pages * PageSize, state));
- ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- if (is_pa_valid) {
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->Operate(addr, num_pages, properties.perm, OperationType::Map, phys_addr));
- } else {
- R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
- }
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- *out_addr = addr;
- R_SUCCEED();
-}
-
-Result KPageTable::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- // Check that the map is in range.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Map the pages.
- R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
- // Check that the unmap is in range.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, state, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform the unmap.
- const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(this->Operate(address, num_pages, unmap_properties.perm, OperationType::Unmap));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
- KProcessAddress region_start, size_t region_num_pages,
- KMemoryState state, KMemoryPermission perm) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid map request.
- const size_t num_pages = pg.GetNumPages();
- R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
- ResultInvalidCurrentMemory);
- R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Find a random address to map at.
- KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
- 0, this->GetNumGuardPages());
- R_UNLESS(addr != 0, ResultOutOfMemory);
- ASSERT(this->CanContain(addr, num_pages * PageSize, state));
- ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None) == ResultSuccess);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- *out_addr = addr;
- R_SUCCEED();
-}
-
-Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
- KMemoryPermission perm) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid map request.
- const size_t num_pages = pg.GetNumPages();
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to map.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform mapping operation.
- const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
- R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
- KMemoryBlockDisableMergeAttribute::None);
-
- // We successfully mapped the pages.
- R_SUCCEED();
-}
-
-Result KPageTable::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
- KMemoryState state) {
- ASSERT(!this->IsLockedByCurrentThread());
-
- // Ensure this is a valid unmap request.
- const size_t num_pages = pg.GetNumPages();
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to unmap.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
- KMemoryState::All, state, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::None));
-
- // Check that the page group is valid.
- R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // We're going to perform an update, so create a helper.
- KScopedPageTableUpdater updater(this);
-
- // Perform unmapping operation.
- const KPageProperties properties = {KMemoryPermission::None, false, false,
- DisableMergeAttribute::None};
- R_TRY(this->Operate(address, num_pages, properties.perm, OperationType::Unmap));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
- KMemoryPermission::None, KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Normal);
-
- R_SUCCEED();
-}
-
-Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) {
- // Ensure that the page group isn't null.
- ASSERT(out != nullptr);
-
- // Make sure that the region we're mapping is valid for the table.
- const size_t size = num_pages * PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check if state allows us to create the group.
- R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Create a new page group for the region.
- R_TRY(this->MakePageGroup(*out, address, num_pages));
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm) {
- const size_t num_pages = size / PageSize;
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory permission.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
- std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::FlagCode, KMemoryState::FlagCode,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Determine new perm/state.
- const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
- KMemoryState new_state = old_state;
- const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
- const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
- const bool was_x =
- (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
- ASSERT(!(is_w && is_x));
-
- if (is_w) {
- switch (old_state) {
- case KMemoryState::Code:
- new_state = KMemoryState::CodeData;
- break;
- case KMemoryState::AliasCode:
- new_state = KMemoryState::AliasCodeData;
- break;
- default:
- ASSERT(false);
- break;
- }
- }
-
- // Succeed if there's nothing to do.
- R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Perform mapping operation.
- const auto operation =
- was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
- R_TRY(Operate(addr, num_pages, new_perm, operation));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Ensure cache coherency, if we're setting pages as executable.
- if (is_x) {
- m_system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
- }
-
- R_SUCCEED();
-}
-
-KMemoryInfo KPageTable::QueryInfoImpl(KProcessAddress addr) {
- KScopedLightLock lk(m_general_lock);
-
- return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo();
-}
-
-KMemoryInfo KPageTable::QueryInfo(KProcessAddress addr) {
- if (!Contains(addr, 1)) {
- return {
- .m_address = GetInteger(m_address_space_end),
- .m_size = 0 - GetInteger(m_address_space_end),
- .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
- .m_device_disable_merge_left_count = 0,
- .m_device_disable_merge_right_count = 0,
- .m_ipc_lock_count = 0,
- .m_device_use_count = 0,
- .m_ipc_disable_merge_count = 0,
- .m_permission = KMemoryPermission::None,
- .m_attribute = KMemoryAttribute::None,
- .m_original_permission = KMemoryPermission::None,
- .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
- };
- }
-
- return QueryInfoImpl(addr);
-}
-
-Result KPageTable::SetMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm) {
- const size_t num_pages = size / PageSize;
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory permission.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
- std::addressof(num_allocator_blocks), addr, size,
- KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Determine new perm.
- const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
- R_SUCCEED_IF(old_perm == new_perm);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Perform mapping operation.
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
-
- // Update the blocks.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
- const size_t num_pages = size / PageSize;
- ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
- KMemoryAttribute::SetMask);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Verify we can change the memory attribute.
- KMemoryState old_state;
- KMemoryPermission old_perm;
- KMemoryAttribute old_attr;
- size_t num_allocator_blocks;
- constexpr auto AttributeTestMask =
- ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
- const KMemoryState state_test_mask =
- static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
- ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
- : 0) |
- ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
- ? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
- : 0));
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_test_mask, state_test_mask,
- KMemoryPermission::None, KMemoryPermission::None,
- AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // If we need to, perform a change attribute operation.
- if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
- // Perform operation.
- R_TRY(this->Operate(addr, num_pages, old_perm,
- OperationType::ChangePermissionsAndRefreshAndFlush, 0));
- }
-
- // Update the blocks.
- m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
- static_cast<KMemoryAttribute>(mask),
- static_cast<KMemoryAttribute>(attr));
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetMaxHeapSize(size_t size) {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Only process page tables are allowed to set heap size.
- ASSERT(!this->IsKernel());
-
- m_max_heap_size = size;
-
- R_SUCCEED();
-}
-
-Result KPageTable::SetHeapSize(u64* out, size_t size) {
- // Lock the physical memory mutex.
- KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
-
- // Try to perform a reduction in heap, instead of an extension.
- KProcessAddress cur_address{};
- size_t allocation_size{};
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Validate that setting heap size is possible at all.
- R_UNLESS(!m_is_kernel, ResultOutOfMemory);
- R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
- ResultOutOfMemory);
- R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
-
- if (size < GetHeapSize()) {
- // The size being requested is less than the current size, so we need to free the end of
- // the heap.
-
- // Validate memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
- m_heap_region_start + size, GetHeapSize() - size,
- KMemoryState::All, KMemoryState::Normal,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::All, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager,
- num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Unmap the end of the heap.
- const auto num_pages = (GetHeapSize() - size) / PageSize;
- R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None,
- OperationType::Unmap));
-
- // Release the memory from the resource limit.
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize);
-
- // Apply the memory block update.
- m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
- num_pages, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None,
- KMemoryBlockDisableMergeAttribute::None,
- size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None);
-
- // Update the current heap end.
- m_current_heap_end = m_heap_region_start + size;
-
- // Set the output.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- } else if (size == GetHeapSize()) {
- // The size requested is exactly the current size.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- } else {
- // We have to allocate memory. Determine how much to allocate and where while the table
- // is locked.
- cur_address = m_current_heap_end;
- allocation_size = size - GetHeapSize();
- }
- }
-
- // Reserve memory for the heap extension.
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size);
- R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
-
- // Allocate pages for the heap extension.
- KPageGroup pg{m_kernel, m_block_info_manager};
- R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen(
- &pg, allocation_size / PageSize,
- KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option)));
-
- // Clear all the newly allocated pages.
- for (const auto& it : pg) {
- std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value,
- it.GetSize());
- }
-
- // Map the pages.
- {
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Ensure that the heap hasn't changed since we began executing.
- ASSERT(cur_address == m_current_heap_end);
-
- // Check the memory state.
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
- allocation_size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryAttribute::None));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(
- std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Map the pages.
- const auto num_pages = allocation_size / PageSize;
- R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup));
-
- // Clear all the newly allocated pages.
- for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
- std::memset(m_memory->GetPointer(m_current_heap_end + (cur_page * PageSize)), 0,
- PageSize);
- }
-
- // We succeeded, so commit our memory reservation.
- memory_reservation.Commit();
-
- // Apply the memory block update.
- m_memory_block_manager.Update(
- std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
- m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
- : KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::None);
-
- // Update the current heap end.
- m_current_heap_end = m_heap_region_start + size;
-
- // Set the output.
- *out = GetInteger(m_heap_region_start);
- R_SUCCEED();
- }
-}
-
-Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
- size_t size, KMemoryPermission perm,
- bool is_aligned, bool check_heap) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- const auto test_state =
- (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
- (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
- size_t num_allocator_blocks;
- KMemoryState old_state;
- R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
- std::addressof(num_allocator_blocks), address, size, test_state,
- test_state, perm, perm,
- KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
- KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
- &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
-
- // Set whether the locked memory was io.
- *out_is_io =
- static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
- bool check_heap) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- const auto test_state = KMemoryState::FlagCanDeviceMap |
- (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_allocator_blocks), address, size, test_state, test_state,
- KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result;
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
- m_enable_device_address_space_merge
- ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
- : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
- KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
- // Lightly validate the range before doing anything else.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the memory state.
- size_t num_allocator_blocks;
- R_TRY(this->CheckMemoryStateContiguous(
- std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
- KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update the memory blocks.
- m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
- &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
- size_t size) {
- R_RETURN(this->LockMemoryAndOpen(
- nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
- KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
- KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
- KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
- R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
- KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Locked, nullptr));
-}
-
-Result KPageTable::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
- KMemoryPermission perm) {
- R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
- KMemoryState::FlagCanTransfer, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForTransferMemory(KProcessAddress address, size_t size,
- const KPageGroup& pg) {
- R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
- KMemoryState::FlagCanTransfer, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
- KMemoryAttribute::Locked, std::addressof(pg)));
-}
-
-Result KPageTable::LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size) {
- R_RETURN(this->LockMemoryAndOpen(
- out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
- KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
- KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite,
- KMemoryAttribute::Locked));
-}
-
-Result KPageTable::UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg) {
- R_RETURN(this->UnlockMemory(
- addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
- KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
- KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
-}
-
-bool KPageTable::IsRegionContiguous(KProcessAddress addr, u64 size) const {
- auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr));
- for (u64 offset{}; offset < size; offset += PageSize) {
- if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(GetInteger(addr) + offset)) {
- return false;
- }
- start_ptr += PageSize;
- }
- return true;
-}
-
-void KPageTable::AddRegionToPages(KProcessAddress start, size_t num_pages,
- KPageGroup& page_linked_list) {
- KProcessAddress addr{start};
- while (addr < start + (num_pages * PageSize)) {
- const KPhysicalAddress paddr{GetPhysicalAddr(addr)};
- ASSERT(paddr != 0);
- page_linked_list.AddBlock(paddr, 1);
- addr += PageSize;
- }
-}
-
-KProcessAddress KPageTable::AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
- u64 needed_num_pages, size_t align) {
- if (m_enable_aslr) {
- UNIMPLEMENTED();
- }
- return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
- IsKernel() ? 1 : 4);
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
- OperationType operation) {
- ASSERT(this->IsLockedByCurrentThread());
-
- ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
- ASSERT(num_pages > 0);
- ASSERT(num_pages == page_group.GetNumPages());
-
- switch (operation) {
- case OperationType::MapGroup:
- case OperationType::MapFirstGroup: {
- // We want to maintain a new reference to every page in the group.
- KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
-
- for (const auto& node : page_group) {
- const size_t size{node.GetNumPages() * PageSize};
-
- // Map the pages.
- m_memory->MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress());
-
- addr += size;
- }
-
- // We succeeded! We want to persist the reference to the pages.
- spg.CancelClose();
-
- break;
- }
- default:
- ASSERT(false);
- break;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, KPhysicalAddress map_addr) {
- ASSERT(this->IsLockedByCurrentThread());
-
- ASSERT(num_pages > 0);
- ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
- ASSERT(ContainsPages(addr, num_pages));
-
- switch (operation) {
- case OperationType::Unmap: {
- // Ensure that any pages we track close on exit.
- KPageGroup pages_to_close{m_kernel, this->GetBlockInfoManager()};
- SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
-
- this->AddRegionToPages(addr, num_pages, pages_to_close);
- m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
- break;
- }
- case OperationType::Map: {
- ASSERT(map_addr);
- ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
- m_memory->MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
-
- // Open references to pages, if we should.
- if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
- m_kernel.MemoryManager().Open(map_addr, num_pages);
- }
- break;
- }
- case OperationType::Separate: {
- // HACK: Unimplemented.
- break;
- }
- case OperationType::ChangePermissions:
- case OperationType::ChangePermissionsAndRefresh:
- case OperationType::ChangePermissionsAndRefreshAndFlush:
- break;
- default:
- ASSERT(false);
- break;
- }
- R_SUCCEED();
-}
-
-void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
- while (page_list->Peek()) {
- [[maybe_unused]] auto page = page_list->Pop();
-
- // TODO(bunnei): Free pages once they are allocated in guest memory
- // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
- // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
- // this->GetPageTableManager().Free(page);
- }
-}
-
-KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return m_address_space_start;
- case Svc::MemoryState::Normal:
- return m_heap_region_start;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- return m_alias_region_start;
- case Svc::MemoryState::Stack:
- return m_stack_region_start;
- case Svc::MemoryState::Static:
- case Svc::MemoryState::ThreadLocal:
- return m_kernel_map_region_start;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return m_alias_code_region_start;
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- return m_code_region_start;
- default:
- UNREACHABLE();
- }
-}
-
-size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return m_address_space_end - m_address_space_start;
- case Svc::MemoryState::Normal:
- return m_heap_region_end - m_heap_region_start;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- return m_alias_region_end - m_alias_region_start;
- case Svc::MemoryState::Stack:
- return m_stack_region_end - m_stack_region_start;
- case Svc::MemoryState::Static:
- case Svc::MemoryState::ThreadLocal:
- return m_kernel_map_region_end - m_kernel_map_region_start;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return m_alias_code_region_end - m_alias_code_region_start;
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- return m_code_region_end - m_code_region_start;
- default:
- UNREACHABLE();
- }
-}
-
-bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
- const KProcessAddress end = addr + size;
- const KProcessAddress last = end - 1;
-
- const KProcessAddress region_start = this->GetRegionAddress(state);
- const size_t region_size = this->GetRegionSize(state);
-
- const bool is_in_region =
- region_start <= addr && addr < end && last <= region_start + region_size - 1;
- const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
- m_heap_region_start == m_heap_region_end);
- const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
- m_alias_region_start == m_alias_region_end);
- switch (state) {
- case Svc::MemoryState::Free:
- case Svc::MemoryState::Kernel:
- return is_in_region;
- case Svc::MemoryState::Io:
- case Svc::MemoryState::Static:
- case Svc::MemoryState::Code:
- case Svc::MemoryState::CodeData:
- case Svc::MemoryState::Shared:
- case Svc::MemoryState::AliasCode:
- case Svc::MemoryState::AliasCodeData:
- case Svc::MemoryState::Stack:
- case Svc::MemoryState::ThreadLocal:
- case Svc::MemoryState::Transfered:
- case Svc::MemoryState::SharedTransfered:
- case Svc::MemoryState::SharedCode:
- case Svc::MemoryState::GeneratedCode:
- case Svc::MemoryState::CodeOut:
- case Svc::MemoryState::Coverage:
- case Svc::MemoryState::Insecure:
- return is_in_region && !is_in_heap && !is_in_alias;
- case Svc::MemoryState::Normal:
- ASSERT(is_in_heap);
- return is_in_region && !is_in_alias;
- case Svc::MemoryState::Ipc:
- case Svc::MemoryState::NonSecureIpc:
- case Svc::MemoryState::NonDeviceIpc:
- ASSERT(is_in_alias);
- return is_in_region && !is_in_heap;
- default:
- return false;
- }
-}
-
-Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- // Validate the states match expectation.
- R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
- size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Get information about the first block.
- const KProcessAddress last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
- KMemoryInfo info = it->GetMemoryInfo();
-
- // If the start address isn't aligned, we need a block.
- const size_t blocks_for_start_align =
- (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
-
- while (true) {
- // Validate against the provided masks.
- R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
- // Break once we're done.
- if (last_addr <= info.GetLastAddress()) {
- break;
- }
-
- // Advance our iterator.
- it++;
- ASSERT(it != m_memory_block_manager.cend());
- info = it->GetMemoryInfo();
- }
-
- // If the end address isn't aligned, we need a block.
- const size_t blocks_for_end_align =
- (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
-
- if (out_blocks_needed != nullptr) {
- *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KMemoryBlockManager::const_iterator it,
- KProcessAddress last_addr, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Get information about the first block.
- KMemoryInfo info = it->GetMemoryInfo();
-
- // Validate all blocks in the range have correct state.
- const KMemoryState first_state = info.m_state;
- const KMemoryPermission first_perm = info.m_permission;
- const KMemoryAttribute first_attr = info.m_attribute;
- while (true) {
- // Validate the current block.
- R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
- R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
- R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
- ResultInvalidCurrentMemory);
-
- // Validate against the provided masks.
- R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
-
- // Break once we're done.
- if (last_addr <= info.GetLastAddress()) {
- break;
- }
-
- // Advance our iterator.
- it++;
- ASSERT(it != m_memory_block_manager.cend());
- info = it->GetMemoryInfo();
- }
-
- // Write output state.
- if (out_state != nullptr) {
- *out_state = first_state;
- }
- if (out_perm != nullptr) {
- *out_perm = first_perm;
- }
- if (out_attr != nullptr) {
- *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
- }
-
- // If the end address isn't aligned, we need a block.
- if (out_blocks_needed != nullptr) {
- const size_t blocks_for_end_align =
- (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
- ? 1
- : 0;
- *out_blocks_needed = blocks_for_end_align;
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- // Check memory state.
- const KProcessAddress last_addr = addr + size - 1;
- KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
- R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
- state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
-
- // If the start address isn't aligned, we need a block.
- if (out_blocks_needed != nullptr &&
- Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
- ++(*out_blocks_needed);
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr) {
- // Validate basic preconditions.
- ASSERT((lock_attr & attr) == KMemoryAttribute::None);
- ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
- KMemoryAttribute::None);
-
- // Validate the lock request.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check that the output page group is empty, if it exists.
- if (out_pg) {
- ASSERT(out_pg->GetNumPages() == 0);
- }
-
- // Check the state.
- KMemoryState old_state{};
- KMemoryPermission old_perm{};
- KMemoryAttribute old_attr{};
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Get the physical address, if we're supposed to.
- if (out_KPhysicalAddress != nullptr) {
- ASSERT(this->GetPhysicalAddressLocked(out_KPhysicalAddress, addr));
- }
-
- // Make the page group, if we're supposed to.
- if (out_pg != nullptr) {
- R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
- }
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Decide on new perm and attr.
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
- KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
-
- // Update permission, if we need to.
- if (new_perm != old_perm) {
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
- }
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- new_attr, KMemoryBlockDisableMergeAttribute::Locked,
- KMemoryBlockDisableMergeAttribute::None);
-
- // If we have an output page group, open.
- if (out_pg) {
- out_pg->Open();
- }
-
- R_SUCCEED();
-}
-
-Result KPageTable::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr, const KPageGroup* pg) {
- // Validate basic preconditions.
- ASSERT((attr_mask & lock_attr) == lock_attr);
- ASSERT((attr & lock_attr) == lock_attr);
-
- // Validate the unlock request.
- const size_t num_pages = size / PageSize;
- R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
-
- // Lock the table.
- KScopedLightLock lk(m_general_lock);
-
- // Check the state.
- KMemoryState old_state{};
- KMemoryPermission old_perm{};
- KMemoryAttribute old_attr{};
- size_t num_allocator_blocks{};
- R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
- std::addressof(old_attr), std::addressof(num_allocator_blocks),
- addr, size, state_mask | KMemoryState::FlagReferenceCounted,
- state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
- attr_mask, attr));
-
- // Check the page group.
- if (pg != nullptr) {
- R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
- }
-
- // Decide on new perm and attr.
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
- KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
-
- // Create an update allocator.
- Result allocator_result{ResultSuccess};
- KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
- m_memory_block_slab_manager, num_allocator_blocks);
- R_TRY(allocator_result);
-
- // Update permission, if we need to.
- if (new_perm != old_perm) {
- R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
- }
-
- // Apply the memory block updates.
- m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
- new_attr, KMemoryBlockDisableMergeAttribute::None,
- KMemoryBlockDisableMergeAttribute::Locked);
-
- R_SUCCEED();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 3d64b6fb0..5541bc13f 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -3,548 +3,14 @@
#pragma once
-#include <memory>
-
-#include "common/common_funcs.h"
-#include "common/page_table.h"
-#include "core/file_sys/program_metadata.h"
-#include "core/hle/kernel/k_dynamic_resource_manager.h"
-#include "core/hle/kernel/k_light_lock.h"
-#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_memory_layout.h"
-#include "core/hle/kernel/k_memory_manager.h"
-#include "core/hle/kernel/k_typed_address.h"
-#include "core/hle/result.h"
-#include "core/memory.h"
-
-namespace Core {
-class System;
-}
+#include "core/hle/kernel/k_page_table_base.h"
namespace Kernel {
-enum class DisableMergeAttribute : u8 {
- None = (0U << 0),
- DisableHead = (1U << 0),
- DisableHeadAndBody = (1U << 1),
- EnableHeadAndBody = (1U << 2),
- DisableTail = (1U << 3),
- EnableTail = (1U << 4),
- EnableAndMergeHeadBodyTail = (1U << 5),
- EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
- DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
-};
-
-struct KPageProperties {
- KMemoryPermission perm;
- bool io;
- bool uncached;
- DisableMergeAttribute disable_merge_attributes;
-};
-static_assert(std::is_trivial_v<KPageProperties>);
-static_assert(sizeof(KPageProperties) == sizeof(u32));
-
-class KBlockInfoManager;
-class KMemoryBlockManager;
-class KResourceLimit;
-class KSystemResource;
-
-class KPageTable final {
-protected:
- struct PageLinkedList;
-
-public:
- enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
-
- YUZU_NON_COPYABLE(KPageTable);
- YUZU_NON_MOVEABLE(KPageTable);
-
- explicit KPageTable(Core::System& system_);
- ~KPageTable();
-
- Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
- KProcessAddress code_addr, size_t code_size,
- KSystemResource* system_resource, KResourceLimit* resource_limit,
- Core::Memory::Memory& memory);
-
- void Finalize();
-
- Result MapProcessCode(KProcessAddress addr, size_t pages_count, KMemoryState state,
- KMemoryPermission perm);
- Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
- Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
- ICacheInvalidationStrategy icache_invalidation_strategy);
- Result UnmapProcessMemory(KProcessAddress dst_addr, size_t size, KPageTable& src_page_table,
- KProcessAddress src_addr);
- Result MapPhysicalMemory(KProcessAddress addr, size_t size);
- Result UnmapPhysicalMemory(KProcessAddress addr, size_t size);
- Result MapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
- Result UnmapMemory(KProcessAddress dst_addr, KProcessAddress src_addr, size_t size);
- Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
- Svc::MemoryPermission svc_perm);
- KMemoryInfo QueryInfo(KProcessAddress addr);
- Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
- Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
- Result SetMaxHeapSize(size_t size);
- Result SetHeapSize(u64* out, size_t size);
- Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
- KMemoryPermission perm, bool is_aligned, bool check_heap);
- Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
-
- Result UnlockForDeviceAddressSpace(KProcessAddress addr, size_t size);
-
- Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
- Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
-
- Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
- KPageTable& src_page_table, KMemoryPermission test_perm,
- KMemoryState dst_state, bool send);
- Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
- Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
-
- Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
- KMemoryPermission perm);
- Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
- Result LockForCodeMemory(KPageGroup* out, KProcessAddress addr, size_t size);
- Result UnlockForCodeMemory(KProcessAddress addr, size_t size, const KPageGroup& pg);
- Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr);
-
- Common::PageTable& PageTableImpl() {
- return *m_page_table_impl;
- }
-
- const Common::PageTable& PageTableImpl() const {
- return *m_page_table_impl;
- }
-
- KBlockInfoManager* GetBlockInfoManager() {
- return m_block_info_manager;
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, KProcessAddress region_start,
- size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
- region_num_pages, state, perm));
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
- R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
- this->GetRegionAddress(state),
- this->GetRegionSize(state) / PageSize, state, perm));
- }
-
- Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
- KMemoryPermission perm);
- Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
-
- Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
- KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
- KMemoryPermission perm);
- Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
- KMemoryPermission perm);
- Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
- void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
- const KPageGroup& pg);
-
- KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
- size_t GetRegionSize(Svc::MemoryState state) const;
- bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
-
- KProcessAddress GetRegionAddress(KMemoryState state) const {
- return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
- size_t GetRegionSize(KMemoryState state) const {
- return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
- bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
- return this->CanContain(addr, size,
- static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
- }
-
-protected:
- struct PageLinkedList {
- private:
- struct Node {
- Node* m_next;
- std::array<u8, PageSize - sizeof(Node*)> m_buffer;
- };
-
- public:
- constexpr PageLinkedList() = default;
-
- void Push(Node* n) {
- ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
- n->m_next = m_root;
- m_root = n;
- }
-
- void Push(Core::Memory::Memory& memory, KVirtualAddress addr) {
- this->Push(memory.GetPointer<Node>(GetInteger(addr)));
- }
-
- Node* Peek() const {
- return m_root;
- }
-
- Node* Pop() {
- Node* const r = m_root;
-
- m_root = r->m_next;
- r->m_next = nullptr;
-
- return r;
- }
-
- private:
- Node* m_root{};
- };
- static_assert(std::is_trivially_destructible<PageLinkedList>::value);
-
-private:
- enum class OperationType : u32 {
- Map = 0,
- MapGroup = 1,
- MapFirstGroup = 2,
- Unmap = 3,
- ChangePermissions = 4,
- ChangePermissionsAndRefresh = 5,
- ChangePermissionsAndRefreshAndFlush = 6,
- Separate = 7,
- };
-
- static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
- KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
-
- Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
- KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
- size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
- bool IsRegionContiguous(KProcessAddress addr, u64 size) const;
- void AddRegionToPages(KProcessAddress start, size_t num_pages, KPageGroup& page_linked_list);
- KMemoryInfo QueryInfoImpl(KProcessAddress addr);
- KProcessAddress AllocateVirtualMemory(KProcessAddress start, size_t region_num_pages,
- u64 needed_num_pages, size_t align);
- Result Operate(KProcessAddress addr, size_t num_pages, const KPageGroup& page_group,
- OperationType operation);
- Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
- OperationType operation, KPhysicalAddress map_addr = 0);
- void FinalizeUpdate(PageLinkedList* page_list);
-
- KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
- size_t num_pages, size_t alignment, size_t offset,
- size_t guard_pages);
-
- Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
- R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
- perm, attr_mask, attr));
- }
-
- Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
- Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, size_t* out_blocks_needed,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
- state_mask, state, perm_mask, perm, attr_mask, attr,
- ignore_attr));
- }
- Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
- R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
- attr_mask, attr, ignore_attr));
- }
-
- Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_KPhysicalAddress,
- KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryPermission new_perm,
- KMemoryAttribute lock_attr);
- Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryPermission new_perm, KMemoryAttribute lock_attr,
- const KPageGroup* pg);
-
- Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
- bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
-
- bool IsLockedByCurrentThread() const {
- return m_general_lock.IsLockedByCurrentThread();
- }
-
- bool IsHeapPhysicalAddress(const KMemoryLayout& layout, KPhysicalAddress phys_addr) {
- ASSERT(this->IsLockedByCurrentThread());
-
- return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
- }
-
- bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
- ASSERT(this->IsLockedByCurrentThread());
-
- *out = GetPhysicalAddr(virt_addr);
-
- return *out != 0;
- }
-
- Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
- KProcessAddress address, size_t size, KMemoryPermission test_perm,
- KMemoryState dst_state);
- Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
- KMemoryPermission test_perm, KMemoryState dst_state,
- KPageTable& src_page_table, bool send);
- void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
- size_t size, KMemoryPermission prot_perm);
-
- Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
- size_t num_pages, KMemoryPermission perm);
- Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
- const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
-
- mutable KLightLock m_general_lock;
- mutable KLightLock m_map_physical_memory_lock;
-
-public:
- constexpr KProcessAddress GetAddressSpaceStart() const {
- return m_address_space_start;
- }
- constexpr KProcessAddress GetAddressSpaceEnd() const {
- return m_address_space_end;
- }
- constexpr size_t GetAddressSpaceSize() const {
- return m_address_space_end - m_address_space_start;
- }
- constexpr KProcessAddress GetHeapRegionStart() const {
- return m_heap_region_start;
- }
- constexpr KProcessAddress GetHeapRegionEnd() const {
- return m_heap_region_end;
- }
- constexpr size_t GetHeapRegionSize() const {
- return m_heap_region_end - m_heap_region_start;
- }
- constexpr KProcessAddress GetAliasRegionStart() const {
- return m_alias_region_start;
- }
- constexpr KProcessAddress GetAliasRegionEnd() const {
- return m_alias_region_end;
- }
- constexpr size_t GetAliasRegionSize() const {
- return m_alias_region_end - m_alias_region_start;
- }
- constexpr KProcessAddress GetStackRegionStart() const {
- return m_stack_region_start;
- }
- constexpr KProcessAddress GetStackRegionEnd() const {
- return m_stack_region_end;
- }
- constexpr size_t GetStackRegionSize() const {
- return m_stack_region_end - m_stack_region_start;
- }
- constexpr KProcessAddress GetKernelMapRegionStart() const {
- return m_kernel_map_region_start;
- }
- constexpr KProcessAddress GetKernelMapRegionEnd() const {
- return m_kernel_map_region_end;
- }
- constexpr KProcessAddress GetCodeRegionStart() const {
- return m_code_region_start;
- }
- constexpr KProcessAddress GetCodeRegionEnd() const {
- return m_code_region_end;
- }
- constexpr KProcessAddress GetAliasCodeRegionStart() const {
- return m_alias_code_region_start;
- }
- constexpr KProcessAddress GetAliasCodeRegionEnd() const {
- return m_alias_code_region_end;
- }
- constexpr size_t GetAliasCodeRegionSize() const {
- return m_alias_code_region_end - m_alias_code_region_start;
- }
- size_t GetNormalMemorySize() {
- KScopedLightLock lk(m_general_lock);
- return GetHeapSize() + m_mapped_physical_memory_size;
- }
- constexpr size_t GetAddressSpaceWidth() const {
- return m_address_space_width;
- }
- constexpr size_t GetHeapSize() const {
- return m_current_heap_end - m_heap_region_start;
- }
- constexpr size_t GetNumGuardPages() const {
- return IsKernel() ? 1 : 4;
- }
- KPhysicalAddress GetPhysicalAddr(KProcessAddress addr) const {
- const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
- ASSERT(backing_addr);
- return backing_addr + GetInteger(addr);
- }
- constexpr bool Contains(KProcessAddress addr) const {
- return m_address_space_start <= addr && addr <= m_address_space_end - 1;
- }
- constexpr bool Contains(KProcessAddress addr, size_t size) const {
- return m_address_space_start <= addr && addr < addr + size &&
- addr + size - 1 <= m_address_space_end - 1;
- }
- constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
- return this->Contains(addr, size) && m_alias_region_start <= addr &&
- addr + size - 1 <= m_alias_region_end - 1;
- }
- constexpr bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
- return this->Contains(addr, size) && m_heap_region_start <= addr &&
- addr + size - 1 <= m_heap_region_end - 1;
- }
-
+class KPageTable final : public KPageTableBase {
public:
- static KVirtualAddress GetLinearMappedVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return layout.GetLinearVirtualAddress(addr);
- }
-
- static KPhysicalAddress GetLinearMappedPhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return layout.GetLinearPhysicalAddress(addr);
- }
-
- static KVirtualAddress GetHeapVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return GetLinearMappedVirtualAddress(layout, addr);
- }
-
- static KPhysicalAddress GetHeapPhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return GetLinearMappedPhysicalAddress(layout, addr);
- }
-
- static KVirtualAddress GetPageTableVirtualAddress(const KMemoryLayout& layout,
- KPhysicalAddress addr) {
- return GetLinearMappedVirtualAddress(layout, addr);
- }
-
- static KPhysicalAddress GetPageTablePhysicalAddress(const KMemoryLayout& layout,
- KVirtualAddress addr) {
- return GetLinearMappedPhysicalAddress(layout, addr);
- }
-
-private:
- constexpr bool IsKernel() const {
- return m_is_kernel;
- }
- constexpr bool IsAslrEnabled() const {
- return m_enable_aslr;
- }
-
- constexpr bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
- return (m_address_space_start <= addr) &&
- (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
- (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
- }
-
-private:
- class KScopedPageTableUpdater {
- private:
- KPageTable* m_pt{};
- PageLinkedList m_ll;
-
- public:
- explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
- explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
- ~KScopedPageTableUpdater() {
- m_pt->FinalizeUpdate(this->GetPageList());
- }
-
- PageLinkedList* GetPageList() {
- return std::addressof(m_ll);
- }
- };
-
-private:
- KProcessAddress m_address_space_start{};
- KProcessAddress m_address_space_end{};
- KProcessAddress m_heap_region_start{};
- KProcessAddress m_heap_region_end{};
- KProcessAddress m_current_heap_end{};
- KProcessAddress m_alias_region_start{};
- KProcessAddress m_alias_region_end{};
- KProcessAddress m_stack_region_start{};
- KProcessAddress m_stack_region_end{};
- KProcessAddress m_kernel_map_region_start{};
- KProcessAddress m_kernel_map_region_end{};
- KProcessAddress m_code_region_start{};
- KProcessAddress m_code_region_end{};
- KProcessAddress m_alias_code_region_start{};
- KProcessAddress m_alias_code_region_end{};
-
- size_t m_max_heap_size{};
- size_t m_mapped_physical_memory_size{};
- size_t m_mapped_unsafe_physical_memory{};
- size_t m_mapped_insecure_memory{};
- size_t m_mapped_ipc_server_memory{};
- size_t m_address_space_width{};
-
- KMemoryBlockManager m_memory_block_manager;
- u32 m_allocate_option{};
-
- bool m_is_kernel{};
- bool m_enable_aslr{};
- bool m_enable_device_address_space_merge{};
-
- KMemoryBlockSlabManager* m_memory_block_slab_manager{};
- KBlockInfoManager* m_block_info_manager{};
- KResourceLimit* m_resource_limit{};
-
- u32 m_heap_fill_value{};
- u32 m_ipc_fill_value{};
- u32 m_stack_fill_value{};
- const KMemoryRegion* m_cached_physical_heap_region{};
-
- KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
- KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
-
- std::unique_ptr<Common::PageTable> m_page_table_impl;
-
- Core::System& m_system;
- KernelCore& m_kernel;
- Core::Memory::Memory* m_memory{};
+ explicit KPageTable(KernelCore& kernel) : KPageTableBase(kernel) {}
+ ~KPageTable() = default;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
new file mode 100644
index 000000000..6691586ed
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -0,0 +1,5739 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "common/settings.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_address_space_info.h"
+#include "core/hle/kernel/k_page_table_base.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
+#include "core/hle/kernel/k_system_resource.h"
+
+namespace Kernel {
+
+namespace {
+
+class KScopedLightLockPair {
+ YUZU_NON_COPYABLE(KScopedLightLockPair);
+ YUZU_NON_MOVEABLE(KScopedLightLockPair);
+
+private:
+ KLightLock* m_lower;
+ KLightLock* m_upper;
+
+public:
+ KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
+ // Ensure our locks are in a consistent order.
+ if (std::addressof(lhs) <= std::addressof(rhs)) {
+ m_lower = std::addressof(lhs);
+ m_upper = std::addressof(rhs);
+ } else {
+ m_lower = std::addressof(rhs);
+ m_upper = std::addressof(lhs);
+ }
+
+ // Acquire both locks.
+ m_lower->Lock();
+ if (m_lower != m_upper) {
+ m_upper->Lock();
+ }
+ }
+
+ ~KScopedLightLockPair() {
+ // Unlock the upper lock.
+ if (m_upper != nullptr && m_upper != m_lower) {
+ m_upper->Unlock();
+ }
+
+ // Unlock the lower lock.
+ if (m_lower != nullptr) {
+ m_lower->Unlock();
+ }
+ }
+
+public:
+ // Utility.
+ void TryUnlockHalf(KLightLock& lock) {
+ // Only allow unlocking if the lock is half the pair.
+ if (m_lower != m_upper) {
+ // We want to be sure the lock is one we own.
+ if (m_lower == std::addressof(lock)) {
+ lock.Unlock();
+ m_lower = nullptr;
+ } else if (m_upper == std::addressof(lock)) {
+ lock.Unlock();
+ m_upper = nullptr;
+ }
+ }
+ }
+};
+
+template <typename AddressType>
+void InvalidateInstructionCache(Core::System& system, AddressType addr, u64 size) {
+ system.InvalidateCpuInstructionCacheRange(GetInteger(addr), size);
+}
+
+template <typename AddressType>
+Result InvalidateDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+template <typename AddressType>
+Result StoreDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+template <typename AddressType>
+Result FlushDataCache(AddressType addr, u64 size) {
+ R_SUCCEED();
+}
+
+constexpr Common::MemoryPermission ConvertToMemoryPermission(KMemoryPermission perm) {
+ Common::MemoryPermission perms{};
+ if (True(perm & KMemoryPermission::UserRead)) {
+ perms |= Common::MemoryPermission::Read;
+ }
+ if (True(perm & KMemoryPermission::UserWrite)) {
+ perms |= Common::MemoryPermission::Write;
+ }
+#ifdef HAS_NCE
+ if (True(perm & KMemoryPermission::UserExecute)) {
+ perms |= Common::MemoryPermission::Execute;
+ }
+#endif
+ return perms;
+}
+
+} // namespace
+
+void KPageTableBase::MemoryRange::Open() {
+ // If the range contains heap pages, open them.
+ if (this->IsHeap()) {
+ m_kernel.MemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize);
+ }
+}
+
+void KPageTableBase::MemoryRange::Close() {
+ // If the range contains heap pages, close them.
+ if (this->IsHeap()) {
+ m_kernel.MemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize);
+ }
+}
+
+KPageTableBase::KPageTableBase(KernelCore& kernel)
+ : m_kernel(kernel), m_system(kernel.System()), m_general_lock(kernel),
+ m_map_physical_memory_lock(kernel), m_device_map_lock(kernel) {}
+KPageTableBase::~KPageTableBase() = default;
+
+Result KPageTableBase::InitializeForKernel(bool is_64_bit, KVirtualAddress start,
+ KVirtualAddress end, Core::Memory::Memory& memory) {
+ // Initialize our members.
+ m_address_space_width =
+ static_cast<u32>(is_64_bit ? Common::BitSize<u64>() : Common::BitSize<u32>());
+ m_address_space_start = KProcessAddress(GetInteger(start));
+ m_address_space_end = KProcessAddress(GetInteger(end));
+ m_is_kernel = true;
+ m_enable_aslr = true;
+ m_enable_device_address_space_merge = false;
+
+ m_heap_region_start = 0;
+ m_heap_region_end = 0;
+ m_current_heap_end = 0;
+ m_alias_region_start = 0;
+ m_alias_region_end = 0;
+ m_stack_region_start = 0;
+ m_stack_region_end = 0;
+ m_kernel_map_region_start = 0;
+ m_kernel_map_region_end = 0;
+ m_alias_code_region_start = 0;
+ m_alias_code_region_end = 0;
+ m_code_region_start = 0;
+ m_code_region_end = 0;
+ m_max_heap_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_mapped_unsafe_physical_memory = 0;
+ m_mapped_insecure_memory = 0;
+ m_mapped_ipc_server_memory = 0;
+
+ m_memory_block_slab_manager =
+ m_kernel.GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
+ m_block_info_manager = m_kernel.GetSystemSystemResource().GetBlockInfoManagerPointer();
+ m_resource_limit = m_kernel.GetSystemResourceLimit();
+
+ m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool::System,
+ KMemoryManager::Direction::FromFront);
+ m_heap_fill_value = MemoryFillValue_Zero;
+ m_ipc_fill_value = MemoryFillValue_Zero;
+ m_stack_fill_value = MemoryFillValue_Zero;
+
+ m_cached_physical_linear_region = nullptr;
+ m_cached_physical_heap_region = nullptr;
+
+ // Initialize our implementation.
+ m_impl = std::make_unique<Common::PageTable>();
+ m_impl->Resize(m_address_space_width, PageBits);
+
+ // Set the tracking memory.
+ m_memory = std::addressof(memory);
+
+ // Initialize our memory block manager.
+ R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+ m_memory_block_slab_manager));
+}
+
+Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+ bool enable_das_merge, bool from_back,
+ KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit,
+ Core::Memory::Memory& memory,
+ KProcessAddress aslr_space_start) {
+ // Calculate region extents.
+ const size_t as_width = GetAddressSpaceWidth(as_type);
+ const KProcessAddress start = 0;
+ const KProcessAddress end = (1ULL << as_width);
+
+ // Validate the region.
+ ASSERT(start <= code_address);
+ ASSERT(code_address < code_address + code_size);
+ ASSERT(code_address + code_size - 1 <= end - 1);
+
+ // Define helpers.
+ auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) {
+ return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
+ };
+ auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) {
+ return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
+ };
+
+ // Set our bit width and heap/alias sizes.
+ m_address_space_width = static_cast<u32>(GetAddressSpaceWidth(as_type));
+ size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+ size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+
+ // Adjust heap/alias size if we don't have an alias region.
+ if ((as_type & Svc::CreateProcessFlag::AddressSpaceMask) ==
+ Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
+ heap_region_size += alias_region_size;
+ alias_region_size = 0;
+ }
+
+ // Set code regions and determine remaining sizes.
+ KProcessAddress process_code_start;
+ KProcessAddress process_code_end;
+ size_t stack_region_size;
+ size_t kernel_map_region_size;
+ if (m_address_space_width == 39) {
+ alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
+ heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
+ stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
+ kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+ m_code_region_start = m_address_space_start + aslr_space_start +
+ GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = m_code_region_end;
+ process_code_start = Common::AlignDown(GetInteger(code_address), RegionAlignment);
+ process_code_end = Common::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
+ } else {
+ stack_region_size = 0;
+ kernel_map_region_size = 0;
+ m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
+ m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
+ m_stack_region_start = m_code_region_start;
+ m_alias_code_region_start = m_code_region_start;
+ m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
+ GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
+ m_stack_region_end = m_code_region_end;
+ m_kernel_map_region_start = m_code_region_start;
+ m_kernel_map_region_end = m_code_region_end;
+ process_code_start = m_code_region_start;
+ process_code_end = m_code_region_end;
+ }
+
+ // Set other basic fields.
+ m_enable_aslr = enable_aslr;
+ m_enable_device_address_space_merge = enable_das_merge;
+ m_address_space_start = start;
+ m_address_space_end = end;
+ m_is_kernel = false;
+ m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
+ m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
+ m_resource_limit = resource_limit;
+
+ // Determine the region we can place our undetermineds in.
+ KProcessAddress alloc_start;
+ size_t alloc_size;
+ if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >=
+ (GetInteger(end) - GetInteger(process_code_end))) {
+ alloc_start = m_code_region_start;
+ alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
+ } else {
+ alloc_start = process_code_end;
+ alloc_size = GetInteger(end) - GetInteger(process_code_end);
+ }
+ const size_t needed_size =
+ (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
+ R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
+
+ const size_t remaining_size = alloc_size - needed_size;
+
+ // Determine random placements for each region.
+ size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
+ if (enable_aslr) {
+ alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
+ RegionAlignment;
+ }
+
+ // Setup heap and alias regions.
+ m_alias_region_start = alloc_start + alias_rnd;
+ m_alias_region_end = m_alias_region_start + alias_region_size;
+ m_heap_region_start = alloc_start + heap_rnd;
+ m_heap_region_end = m_heap_region_start + heap_region_size;
+
+ if (alias_rnd <= heap_rnd) {
+ m_heap_region_start += alias_region_size;
+ m_heap_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += heap_region_size;
+ m_alias_region_end += heap_region_size;
+ }
+
+ // Setup stack region.
+ if (stack_region_size) {
+ m_stack_region_start = alloc_start + stack_rnd;
+ m_stack_region_end = m_stack_region_start + stack_region_size;
+
+ if (alias_rnd < stack_rnd) {
+ m_stack_region_start += alias_region_size;
+ m_stack_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += stack_region_size;
+ m_alias_region_end += stack_region_size;
+ }
+
+ if (heap_rnd < stack_rnd) {
+ m_stack_region_start += heap_region_size;
+ m_stack_region_end += heap_region_size;
+ } else {
+ m_heap_region_start += stack_region_size;
+ m_heap_region_end += stack_region_size;
+ }
+ }
+
+ // Setup kernel map region.
+ if (kernel_map_region_size) {
+ m_kernel_map_region_start = alloc_start + kmap_rnd;
+ m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
+
+ if (alias_rnd < kmap_rnd) {
+ m_kernel_map_region_start += alias_region_size;
+ m_kernel_map_region_end += alias_region_size;
+ } else {
+ m_alias_region_start += kernel_map_region_size;
+ m_alias_region_end += kernel_map_region_size;
+ }
+
+ if (heap_rnd < kmap_rnd) {
+ m_kernel_map_region_start += heap_region_size;
+ m_kernel_map_region_end += heap_region_size;
+ } else {
+ m_heap_region_start += kernel_map_region_size;
+ m_heap_region_end += kernel_map_region_size;
+ }
+
+ if (stack_region_size) {
+ if (stack_rnd < kmap_rnd) {
+ m_kernel_map_region_start += stack_region_size;
+ m_kernel_map_region_end += stack_region_size;
+ } else {
+ m_stack_region_start += kernel_map_region_size;
+ m_stack_region_end += kernel_map_region_size;
+ }
+ }
+ }
+
+ // Set heap and fill members.
+ m_current_heap_end = m_heap_region_start;
+ m_max_heap_size = 0;
+ m_mapped_physical_memory_size = 0;
+ m_mapped_unsafe_physical_memory = 0;
+ m_mapped_insecure_memory = 0;
+ m_mapped_ipc_server_memory = 0;
+
+ // const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
+ const bool fill_memory = false;
+ m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
+ m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero;
+ m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero;
+
+ // Set allocation option.
+ m_allocate_option =
+ KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
+ : KMemoryManager::Direction::FromFront);
+
+ // Ensure that we regions inside our address space.
+ auto IsInAddressSpace = [&](KProcessAddress addr) {
+ return m_address_space_start <= addr && addr <= m_address_space_end;
+ };
+ ASSERT(IsInAddressSpace(m_alias_region_start));
+ ASSERT(IsInAddressSpace(m_alias_region_end));
+ ASSERT(IsInAddressSpace(m_heap_region_start));
+ ASSERT(IsInAddressSpace(m_heap_region_end));
+ ASSERT(IsInAddressSpace(m_stack_region_start));
+ ASSERT(IsInAddressSpace(m_stack_region_end));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_start));
+ ASSERT(IsInAddressSpace(m_kernel_map_region_end));
+
+ // Ensure that we selected regions that don't overlap.
+ const KProcessAddress alias_start = m_alias_region_start;
+ const KProcessAddress alias_last = m_alias_region_end - 1;
+ const KProcessAddress heap_start = m_heap_region_start;
+ const KProcessAddress heap_last = m_heap_region_end - 1;
+ const KProcessAddress stack_start = m_stack_region_start;
+ const KProcessAddress stack_last = m_stack_region_end - 1;
+ const KProcessAddress kmap_start = m_kernel_map_region_start;
+ const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
+ ASSERT(alias_last < heap_start || heap_last < alias_start);
+ ASSERT(alias_last < stack_start || stack_last < alias_start);
+ ASSERT(alias_last < kmap_start || kmap_last < alias_start);
+ ASSERT(heap_last < stack_start || stack_last < heap_start);
+ ASSERT(heap_last < kmap_start || kmap_last < heap_start);
+
+ // Initialize our implementation.
+ m_impl = std::make_unique<Common::PageTable>();
+ m_impl->Resize(m_address_space_width, PageBits);
+
+ // Set the tracking memory.
+ m_memory = std::addressof(memory);
+
+ // Initialize our memory block manager.
+ R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
+ m_memory_block_slab_manager));
+}
+
+void KPageTableBase::Finalize() {
+ auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
+ if (Settings::IsFastmemEnabled()) {
+ m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
+ }
+ };
+
+ // Finalize memory blocks.
+ m_memory_block_manager.Finalize(m_memory_block_slab_manager, std::move(HostUnmapCallback));
+
+ // Free any unsafe mapped memory.
+ if (m_mapped_unsafe_physical_memory) {
+ UNIMPLEMENTED();
+ }
+
+ // Release any insecure mapped memory.
+ if (m_mapped_insecure_memory) {
+ if (auto* const insecure_resource_limit =
+ KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ insecure_resource_limit != nullptr) {
+ insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ m_mapped_insecure_memory);
+ }
+ }
+
+ // Release any ipc server memory.
+ if (m_mapped_ipc_server_memory) {
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ m_mapped_ipc_server_memory);
+ }
+
+ // Close the backing page table, as the destructor is not called for guest objects.
+ m_impl.reset();
+}
+
+KProcessAddress KPageTableBase::GetRegionAddress(Svc::MemoryState state) const {
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return m_address_space_start;
+ case Svc::MemoryState::Normal:
+ return m_heap_region_start;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ return m_alias_region_start;
+ case Svc::MemoryState::Stack:
+ return m_stack_region_start;
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::ThreadLocal:
+ return m_kernel_map_region_start;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return m_alias_code_region_start;
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ return m_code_region_start;
+ default:
+ UNREACHABLE();
+ }
+}
+
+size_t KPageTableBase::GetRegionSize(Svc::MemoryState state) const {
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return m_address_space_end - m_address_space_start;
+ case Svc::MemoryState::Normal:
+ return m_heap_region_end - m_heap_region_start;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ return m_alias_region_end - m_alias_region_start;
+ case Svc::MemoryState::Stack:
+ return m_stack_region_end - m_stack_region_start;
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::ThreadLocal:
+ return m_kernel_map_region_end - m_kernel_map_region_start;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return m_alias_code_region_end - m_alias_code_region_start;
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ return m_code_region_end - m_code_region_start;
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
+ const KProcessAddress end = addr + size;
+ const KProcessAddress last = end - 1;
+
+ const KProcessAddress region_start = this->GetRegionAddress(state);
+ const size_t region_size = this->GetRegionSize(state);
+
+ const bool is_in_region =
+ region_start <= addr && addr < end && last <= region_start + region_size - 1;
+ const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr ||
+ m_heap_region_start == m_heap_region_end);
+ const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
+ m_alias_region_start == m_alias_region_end);
+ switch (state) {
+ case Svc::MemoryState::Free:
+ case Svc::MemoryState::Kernel:
+ return is_in_region;
+ case Svc::MemoryState::Io:
+ case Svc::MemoryState::Static:
+ case Svc::MemoryState::Code:
+ case Svc::MemoryState::CodeData:
+ case Svc::MemoryState::Shared:
+ case Svc::MemoryState::AliasCode:
+ case Svc::MemoryState::AliasCodeData:
+ case Svc::MemoryState::Stack:
+ case Svc::MemoryState::ThreadLocal:
+ case Svc::MemoryState::Transfered:
+ case Svc::MemoryState::SharedTransfered:
+ case Svc::MemoryState::SharedCode:
+ case Svc::MemoryState::GeneratedCode:
+ case Svc::MemoryState::CodeOut:
+ case Svc::MemoryState::Coverage:
+ case Svc::MemoryState::Insecure:
+ return is_in_region && !is_in_heap && !is_in_alias;
+ case Svc::MemoryState::Normal:
+ ASSERT(is_in_heap);
+ return is_in_region && !is_in_alias;
+ case Svc::MemoryState::Ipc:
+ case Svc::MemoryState::NonSecureIpc:
+ case Svc::MemoryState::NonDeviceIpc:
+ ASSERT(is_in_alias);
+ return is_in_region && !is_in_heap;
+ default:
+ return false;
+ }
+}
+
+Result KPageTableBase::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ // Validate the states match expectation.
+ R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr,
+ size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm,
+ KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Get information about the first block.
+ const KProcessAddress last_addr = addr + size - 1;
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+ KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the start address isn't aligned, we need a block.
+ const size_t blocks_for_start_align =
+ (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
+
+ while (true) {
+ // Validate against the provided masks.
+ R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+ // Break once we're done.
+ if (last_addr <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance our iterator.
+ it++;
+ ASSERT(it != m_memory_block_manager.cend());
+ info = it->GetMemoryInfo();
+ }
+
+ // If the end address isn't aligned, we need a block.
+ const size_t blocks_for_end_align =
+ (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
+
+ if (out_blocks_needed != nullptr) {
+ *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KMemoryBlockManager::const_iterator it,
+ KProcessAddress last_addr, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Get information about the first block.
+ KMemoryInfo info = it->GetMemoryInfo();
+
+ // Validate all blocks in the range have correct state.
+ const KMemoryState first_state = info.m_state;
+ const KMemoryPermission first_perm = info.m_permission;
+ const KMemoryAttribute first_attr = info.m_attribute;
+ while (true) {
+ // Validate the current block.
+ R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
+ R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
+ R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
+ ResultInvalidCurrentMemory);
+
+ // Validate against the provided masks.
+ R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
+
+ // Break once we're done.
+ if (last_addr <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance our iterator.
+ it++;
+ ASSERT(it != m_memory_block_manager.cend());
+ info = it->GetMemoryInfo();
+ }
+
+ // Write output state.
+ if (out_state != nullptr) {
+ *out_state = first_state;
+ }
+ if (out_perm != nullptr) {
+ *out_perm = first_perm;
+ }
+ if (out_attr != nullptr) {
+ *out_attr = first_attr & ~ignore_attr;
+ }
+
+ // If the end address isn't aligned, we need a block.
+ if (out_blocks_needed != nullptr) {
+ const size_t blocks_for_end_align =
+ (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
+ ? 1
+ : 0;
+ *out_blocks_needed = blocks_for_end_align;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Check memory state.
+ const KProcessAddress last_addr = addr + size - 1;
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
+ R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
+ state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
+
+ // If the start address isn't aligned, we need a block.
+ if (out_blocks_needed != nullptr &&
+ Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
+ ++(*out_blocks_needed);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr) {
+ // Validate basic preconditions.
+ ASSERT(False(lock_attr & attr));
+ ASSERT(False(lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
+
+ // Validate the lock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check that the output page group is empty, if it exists.
+ if (out_pg) {
+ ASSERT(out_pg->GetNumPages() == 0);
+ }
+
+ // Check the state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Get the physical address, if we're supposed to.
+ if (out_paddr != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ }
+
+ // Make the page group, if we're supposed to.
+ if (out_pg != nullptr) {
+ R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+ }
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = old_attr | static_cast<KMemoryAttribute>(lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ const KPageProperties properties = {new_perm, false,
+ True(old_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // If we have an output group, open.
+ if (out_pg) {
+ out_pg->Open();
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageGroup* pg) {
+ // Validate basic preconditions.
+ ASSERT((attr_mask & lock_attr) == lock_attr);
+ ASSERT((attr & lock_attr) == lock_attr);
+
+ // Validate the unlock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Check the page group.
+ if (pg != nullptr) {
+ R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = old_attr & ~static_cast<KMemoryAttribute>(lock_attr);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ const KPageProperties properties = {new_perm, false,
+ True(old_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ new_attr, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Locked);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+ KProcessAddress address) const {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(out_info != nullptr);
+ ASSERT(out_page != nullptr);
+
+ const KMemoryBlock* block = m_memory_block_manager.FindBlock(address);
+ R_UNLESS(block != nullptr, ResultInvalidCurrentMemory);
+
+ *out_info = block->GetMemoryInfo();
+ out_page->flags = 0;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+ Svc::MemoryState state) const {
+ ASSERT(!this->IsLockedByCurrentThread());
+ ASSERT(out != nullptr);
+
+ const KProcessAddress region_start = this->GetRegionAddress(state);
+ const size_t region_size = this->GetRegionSize(state);
+
+ // Check that the address/size are potentially valid.
+ R_UNLESS((address < address + size), ResultNotFound);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ bool cur_valid = false;
+ TraversalEntry next_entry;
+ bool next_valid;
+ size_t tot_size = 0;
+
+ next_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start);
+ next_entry.block_size =
+ (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1)));
+
+ // Iterate, looking for entry.
+ while (true) {
+ if ((!next_valid && !cur_valid) ||
+ (next_valid && cur_valid &&
+ next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+ cur_entry.block_size += next_entry.block_size;
+ } else {
+ if (cur_valid && cur_entry.phys_addr <= address &&
+ address + size <= cur_entry.phys_addr + cur_entry.block_size) {
+ // Check if this region is valid.
+ const KProcessAddress mapped_address =
+ (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+ if (R_SUCCEEDED(this->CheckMemoryState(
+ mapped_address, size, KMemoryState::Mask, static_cast<KMemoryState>(state),
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None))) {
+ // It is!
+ *out = mapped_address;
+ R_SUCCEED();
+ }
+ }
+
+ // Update tracking variables.
+ tot_size += cur_entry.block_size;
+ cur_entry = next_entry;
+ cur_valid = next_valid;
+ }
+
+ if (cur_entry.block_size + tot_size >= region_size) {
+ break;
+ }
+
+ next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ }
+
+ // Check the last entry.
+ R_UNLESS(cur_valid, ResultNotFound);
+ R_UNLESS(cur_entry.phys_addr <= address, ResultNotFound);
+ R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, ResultNotFound);
+
+ // Check if the last region is valid.
+ const KProcessAddress mapped_address =
+ (region_start + tot_size) + GetInteger(address - cur_entry.phys_addr);
+ R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState::All,
+ static_cast<KMemoryState>(state),
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None)) {
+ R_CONVERT_ALL(ResultNotFound);
+ }
+ R_END_TRY_CATCH;
+
+ // We found the region.
+ *out = mapped_address;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
+ std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Validate that the dst address's state is valid.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Map the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reprotect the source as kernel-read/not mapped.
+ const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
+ const KPageProperties src_properties = {new_src_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableHeadBodyTail};
+ R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+ unprotect_properties, OperationType::ChangePermissions, true));
+ };
+
+ // Map the alias pages.
+ const KPageProperties dst_map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties,
+ false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_src_perm, new_src_attr,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::Stack,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the source address's state is valid.
+ KMemoryState src_state;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
+ src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
+ KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
+ KMemoryAttribute::All, KMemoryAttribute::Locked));
+
+ // Validate that the dst address's state is valid.
+ KMemoryPermission dst_perm;
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
+ dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Unmap the memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+ // Ensure the page group is the valid for the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ dst_unmap_properties, OperationType::Unmap, false));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
+
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, src_state,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify that the source memory is normal heap.
+ KMemoryState src_state;
+ KMemoryPermission src_perm;
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr,
+ std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Verify that the destination memory is unmapped.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // Map the code memory.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the source.
+ R_TRY(this->MakePageGroup(pg, src_address, num_pages));
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reprotect the source as kernel-read/not mapped.
+ const KMemoryPermission new_perm = static_cast<KMemoryPermission>(
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
+ const KPageProperties src_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Ensure that we unprotect the source pages on failure.
+ ON_RESULT_FAILURE {
+ const KPageProperties unprotect_properties = {
+ src_perm, false, false, DisableMergeAttribute::EnableHeadBodyTail};
+ R_ASSERT(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false,
+ unprotect_properties, OperationType::ChangePermissions, true));
+ };
+
+ // Map the alias pages.
+ const KPageProperties dst_properties = {new_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(
+ this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
+ src_state, new_perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::None);
+ m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
+ KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify that the source memory is locked normal heap.
+ size_t num_src_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked));
+
+ // Verify that the destination memory is aliasable code.
+ size_t num_dst_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
+ KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
+
+ // Determine whether any pages being unmapped are code.
+ bool any_code_pages = false;
+ {
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
+ while (true) {
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if the memory has code flag.
+ if (True(info.GetState() & KMemoryState::FlagCode)) {
+ any_code_pages = true;
+ break;
+ }
+
+ // Check if we're done.
+ if (dst_address + size - 1 <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+ }
+
+ // Ensure that we maintain the instruction cache.
+ bool reprotected_pages = false;
+ SCOPE_EXIT({
+ if (reprotected_pages && any_code_pages) {
+ InvalidateInstructionCache(m_system, dst_address, size);
+ }
+ });
+
+ // Unmap.
+ {
+ // Determine the number of pages being operated on.
+ const size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being unmapped.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Create the page group representing the destination.
+ R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
+
+ // Verify that the page group contains the same pages as the source.
+ R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), ResultInvalidMemoryRegion);
+
+ // Create an update allocator for the source.
+ Result src_allocator_result;
+ KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
+ m_memory_block_slab_manager,
+ num_src_allocator_blocks);
+ R_TRY(src_allocator_result);
+
+ // Create an update allocator for the destination.
+ Result dst_allocator_result;
+ KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
+ m_memory_block_slab_manager,
+ num_dst_allocator_blocks);
+ R_TRY(dst_allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the aliased copy of the pages.
+ const KPageProperties dst_unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ dst_unmap_properties, OperationType::Unmap, false));
+
+ // Ensure that we re-map the aliased pages on failure.
+ ON_RESULT_FAILURE {
+ this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
+ };
+
+ // Try to set the permissions for the source pages back to what they should be.
+ const KPageProperties src_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::EnableAndMergeHeadBodyTail};
+ R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, 0, false, src_properties,
+ OperationType::ChangePermissions, false));
+
+ // Apply the memory block updates.
+ m_memory_block_manager.Update(
+ std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal);
+ m_memory_block_manager.Update(
+ std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked);
+
+ // Note that we reprotected pages.
+ reprotected_pages = true;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
+ // Get the insecure memory resource limit and pool.
+ auto* const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ const auto insecure_pool =
+ static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
+
+ // Reserve the insecure memory.
+ // NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached.
+ KScopedResourceReservation memory_reservation(insecure_resource_limit,
+ Svc::LimitableResource::PhysicalMemoryMax, size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultOutOfMemory);
+
+ // Allocate pages for the insecure memory.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateAndOpen(
+ std::addressof(pg), size / PageSize,
+ KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction::FromFront)));
+
+ // Close the opened pages when we're done with them.
+ // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+ // automatically.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+ static_cast<u32>(m_heap_fill_value), it.GetSize());
+ }
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that the address's state is valid.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties,
+ OperationType::MapGroup, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages,
+ KMemoryState::Insecure, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Update our mapped insecure size.
+ m_mapped_insecure_memory += size;
+
+ // Commit the memory reservation.
+ memory_reservation.Commit();
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Insecure, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the memory.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ // Update our mapped insecure size.
+ m_mapped_insecure_memory -= size;
+
+ // Release the insecure memory from the insecure limit.
+ if (auto* const insecure_resource_limit =
+ KSystemControl::GetInsecureMemoryResourceLimit(m_kernel);
+ insecure_resource_limit != nullptr) {
+ insecure_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, size);
+ }
+
+ R_SUCCEED();
+}
+
+KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const {
+ KProcessAddress address = 0;
+
+ if (num_pages <= region_num_pages) {
+ if (this->IsAslrEnabled()) {
+ // Try to directly find a free area up to 8 times.
+ for (size_t i = 0; i < 8; i++) {
+ const size_t random_offset =
+ KSystemControl::GenerateRandomRange(
+ 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
+ alignment;
+ const KProcessAddress candidate =
+ Common::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
+
+ KMemoryInfo info;
+ Svc::PageInfo page_info;
+ R_ASSERT(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info),
+ candidate));
+
+ if (info.m_state != KMemoryState::Free) {
+ continue;
+ }
+ if (!(region_start <= candidate)) {
+ continue;
+ }
+ if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) {
+ continue;
+ }
+ if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+ info.GetLastAddress())) {
+ continue;
+ }
+ if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <=
+ region_start + region_num_pages * PageSize - 1)) {
+ continue;
+ }
+
+ address = candidate;
+ break;
+ }
+ // Fall back to finding the first free area with a random offset.
+ if (address == 0) {
+ // NOTE: Nintendo does not account for guard pages here.
+ // This may theoretically cause an offset to be chosen that cannot be mapped.
+ // We will account for guard pages.
+ const size_t offset_pages = KSystemControl::GenerateRandomRange(
+ 0, region_num_pages - num_pages - guard_pages);
+ address = m_memory_block_manager.FindFreeArea(
+ region_start + offset_pages * PageSize, region_num_pages - offset_pages,
+ num_pages, alignment, offset, guard_pages);
+ }
+ }
+ // Find the first free area.
+ if (address == 0) {
+ address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
+ alignment, offset, guard_pages);
+ }
+ }
+
+ return address;
+}
+
+size_t KPageTableBase::GetSize(KMemoryState state) const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Iterate, counting blocks with the desired state.
+ size_t total_size = 0;
+ for (KMemoryBlockManager::const_iterator it =
+ m_memory_block_manager.FindIterator(m_address_space_start);
+ it != m_memory_block_manager.end(); ++it) {
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+ if (info.GetState() == state) {
+ total_size += info.GetSize();
+ }
+ }
+
+ return total_size;
+}
+
+size_t KPageTableBase::GetCodeSize() const {
+ return this->GetSize(KMemoryState::Code);
+}
+
+size_t KPageTableBase::GetCodeDataSize() const {
+ return this->GetSize(KMemoryState::CodeData);
+}
+
+size_t KPageTableBase::GetAliasCodeSize() const {
+ return this->GetSize(KMemoryState::AliasCode);
+}
+
+size_t KPageTableBase::GetAliasCodeDataSize() const {
+ return this->GetSize(KMemoryState::AliasCodeData);
+}
+
+Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Create a page group to hold the pages we allocate.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Allocate the pages.
+ R_TRY(
+ m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
+
+ // Ensure that the page group is closed when we're done working with it.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()),
+ static_cast<u32>(m_heap_fill_value), it.GetSize());
+ }
+
+ // Map the pages.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::None};
+ R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType::MapGroup,
+ false));
+}
+
+Result KPageTableBase::MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ KProcessAddress cur_address = address;
+
+ // Ensure that we clean up on failure.
+ ON_RESULT_FAILURE {
+ ASSERT(!reuse_ll);
+ if (cur_address != start_address) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(page_list, start_address,
+ (cur_address - start_address) / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ }
+ };
+
+ // Iterate, mapping all pages in the group.
+ for (const auto& block : pg) {
+ // Map and advance.
+ const KPageProperties cur_properties =
+ (cur_address == start_address)
+ ? properties
+ : KPageProperties{properties.perm, properties.io, properties.uncached,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true,
+ cur_properties, OperationType::Map, reuse_ll));
+ cur_address += block.GetSize();
+ }
+
+ // We succeeded!
+ R_SUCCEED();
+}
+
+void KPageTableBase::RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Note the current address, so that we can iterate.
+ const KProcessAddress start_address = address;
+ const KProcessAddress last_address = start_address + size - 1;
+ const KProcessAddress end_address = last_address + 1;
+
+ // Iterate over the memory.
+ auto pg_it = pg.begin();
+ ASSERT(pg_it != pg.end());
+
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ auto it = m_memory_block_manager.FindIterator(start_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Determine the range to map.
+ KProcessAddress map_address = std::max<u64>(info.GetAddress(), GetInteger(start_address));
+ const KProcessAddress map_end_address =
+ std::min<u64>(info.GetEndAddress(), GetInteger(end_address));
+ ASSERT(map_end_address != map_address);
+
+ // Determine if we should disable head merge.
+ const bool disable_head_merge =
+ info.GetAddress() >= GetInteger(start_address) &&
+ True(info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Normal);
+ const KPageProperties map_properties = {
+ info.GetPermission(), false, false,
+ disable_head_merge ? DisableMergeAttribute::DisableHead : DisableMergeAttribute::None};
+
+ // While we have pages to map, map them.
+ size_t map_pages = (map_end_address - map_address) / PageSize;
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ R_ASSERT(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true,
+ map_properties, OperationType::Map, true));
+
+ // Advance.
+ map_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ }
+
+ // Check that we re-mapped precisely the page group.
+ ASSERT((++pg_it) == pg.end());
+}
+
+Result KPageTableBase::MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // We're making a new group, not adding to an existing one.
+ R_UNLESS(pg.empty(), ResultInvalidCurrentMemory);
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr),
+ ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, adding to group as we go.
+ while (tot_size < size) {
+ R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)),
+ ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we add the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // add the last block.
+ const size_t cur_pages = cur_size / PageSize;
+ R_UNLESS(IsHeapPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ R_SUCCEED();
+}
+
+bool KPageTableBase::IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr,
+ size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // Empty groups are necessarily invalid.
+ if (pg.empty()) {
+ return false;
+ }
+
+ auto& impl = this->GetImpl();
+
+ // We're going to validate that the group we'd expect is the group we see.
+ auto cur_it = pg.begin();
+ KPhysicalAddress cur_block_address = cur_it->GetAddress();
+ size_t cur_block_pages = cur_it->GetNumPages();
+
+ auto UpdateCurrentIterator = [&]() {
+ if (cur_block_pages == 0) {
+ if ((++cur_it) == pg.end()) {
+ return false;
+ }
+
+ cur_block_address = cur_it->GetAddress();
+ cur_block_pages = cur_it->GetNumPages();
+ }
+ return true;
+ };
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) {
+ return false;
+ }
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, comparing expected to actual.
+ while (tot_size < size) {
+ if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
+ return false;
+ }
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ if (!IsHeapPhysicalAddress(cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
+ return false;
+ }
+
+ cur_block_address += cur_size;
+ cur_block_pages -= cur_pages;
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we compare the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ if (!IsHeapPhysicalAddress(cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
+}
+
+Result KPageTableBase::GetContiguousMemoryRangeWithState(
+ MemoryRange* out, KProcessAddress address, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ auto& impl = this->GetImpl();
+
+ // Begin a traversal.
+ TraversalContext context;
+ TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address),
+ ResultInvalidCurrentMemory);
+
+ // Traverse until we have enough size or we aren't contiguous any more.
+ const KPhysicalAddress phys_address = cur_entry.phys_addr;
+ size_t contig_size;
+ for (contig_size =
+ cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1));
+ contig_size < size; contig_size += cur_entry.block_size) {
+ if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
+ break;
+ }
+ if (cur_entry.phys_addr != phys_address + contig_size) {
+ break;
+ }
+ }
+
+ // Take the minimum size for our region.
+ size = std::min(size, contig_size);
+
+ // Check that the memory is contiguous (modulo the reference count bit).
+ const KMemoryState test_state_mask = state_mask | KMemoryState::FlagReferenceCounted;
+ const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ address, size, test_state_mask, state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+ if (!is_heap) {
+ R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask,
+ perm, attr_mask, attr));
+ }
+
+ // The memory is contiguous, so set the output range.
+ out->Set(phys_address, size, is_heap);
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory permission.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+ std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Determine new perm.
+ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+ R_SUCCEED_IF(old_perm == new_perm);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissions, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory permission.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
+ std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::FlagCode, KMemoryState::FlagCode,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Make a new page group for the region.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+
+ // Determine new perm/state.
+ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+ KMemoryState new_state = old_state;
+ const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
+ const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+ const bool was_x =
+ (old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
+ ASSERT(!(is_w && is_x));
+
+ if (is_w) {
+ switch (old_state) {
+ case KMemoryState::Code:
+ new_state = KMemoryState::CodeData;
+ break;
+ case KMemoryState::AliasCode:
+ new_state = KMemoryState::AliasCodeData;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // Create a page group, if we're setting execute permissions.
+ if (is_x) {
+ R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
+ }
+
+ // Succeed if there's nothing to do.
+ R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {new_perm, false, false, DisableMergeAttribute::None};
+ const auto operation = was_x ? OperationType::ChangePermissionsAndRefreshAndFlush
+ : OperationType::ChangePermissions;
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties, operation,
+ false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Ensure cache coherency, if we're setting pages as executable.
+ if (is_x) {
+ for (const auto& block : pg) {
+ StoreDataCache(GetHeapVirtualPointer(m_kernel, block.GetAddress()), block.GetSize());
+ }
+ InvalidateInstructionCache(m_system, addr, size);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr) {
+ const size_t num_pages = size / PageSize;
+ ASSERT((mask | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Verify we can change the memory attribute.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ constexpr KMemoryAttribute AttributeTestMask =
+ ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
+ const KMemoryState state_test_mask =
+ (True(mask & KMemoryAttribute::Uncached) ? KMemoryState::FlagCanChangeAttribute
+ : KMemoryState::None) |
+ (True(mask & KMemoryAttribute::PermissionLocked) ? KMemoryState::FlagCanPermissionLock
+ : KMemoryState::None);
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_test_mask, state_test_mask,
+ KMemoryPermission::None, KMemoryPermission::None,
+ AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // If we need to, perform a change attribute operation.
+ if (True(mask & KMemoryAttribute::Uncached)) {
+ // Determine the new attribute.
+ const KMemoryAttribute new_attr =
+ static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
+
+ // Perform operation.
+ const KPageProperties properties = {old_perm, false,
+ True(new_attr & KMemoryAttribute::Uncached),
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, 0, false, properties,
+ OperationType::ChangePermissionsAndRefreshAndFlush, false));
+ }
+
+ // Update the blocks.
+ m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
+ // Lock the physical memory mutex.
+ KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
+
+ // Try to perform a reduction in heap, instead of an extension.
+ KProcessAddress cur_address;
+ size_t allocation_size;
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate that setting heap size is possible at all.
+ R_UNLESS(!m_is_kernel, ResultOutOfMemory);
+ R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start),
+ ResultOutOfMemory);
+ R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory);
+
+ if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+ // The size being requested is less than the current size, so we need to free the end of
+ // the heap.
+
+ // Validate memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(num_allocator_blocks), m_heap_region_start + size,
+ (m_current_heap_end - m_heap_region_start) - size, KMemoryState::All,
+ KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the end of the heap.
+ const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, 0,
+ false, unmap_properties, OperationType::Unmap, false));
+
+ // Release the memory from the resource limit.
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ num_pages * PageSize);
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
+ num_pages, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None);
+
+ // Update the current heap end.
+ m_current_heap_end = m_heap_region_start + size;
+
+ // Set the output.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
+ // The size requested is exactly the current size.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ } else {
+ // We have to allocate memory. Determine how much to allocate and where while the table
+ // is locked.
+ cur_address = m_current_heap_end;
+ allocation_size = size - (m_current_heap_end - m_heap_region_start);
+ }
+ }
+
+ // Reserve memory for the heap extension.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, allocation_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the heap extension.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize,
+ m_allocate_option));
+
+ // Close the opened pages when we're done with them.
+ // If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
+ // automatically.
+ SCOPE_EXIT({ pg.Close(); });
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg) {
+ std::memset(GetHeapVirtualPointer(m_kernel, it.GetAddress()), m_heap_fill_value,
+ it.GetSize());
+ }
+
+ // Map the pages.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Ensure that the heap hasn't changed since we began executing.
+ ASSERT(cur_address == m_current_heap_end);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end,
+ allocation_size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(
+ std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ const size_t num_pages = allocation_size / PageSize;
+ const KPageProperties map_properties = {KMemoryPermission::UserReadWrite, false, false,
+ (m_current_heap_end == m_heap_region_start)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg,
+ map_properties, OperationType::MapGroup, false));
+
+ // We succeeded, so commit our memory reservation.
+ memory_reservation.Commit();
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(
+ std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Update the current heap end.
+ m_current_heap_end = m_heap_region_start + size;
+
+ // Set the output.
+ *out = m_heap_region_start;
+ R_SUCCEED();
+ }
+}
+
+Result KPageTableBase::SetMaxHeapSize(size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Only process page tables are allowed to set heap size.
+ ASSERT(!this->IsKernel());
+
+ m_max_heap_size = size;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const {
+ // If the address is invalid, create a fake block.
+ if (!this->Contains(addr, 1)) {
+ *out_info = {
+ .m_address = GetInteger(m_address_space_end),
+ .m_size = 0 - GetInteger(m_address_space_end),
+ .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
+ .m_device_disable_merge_left_count = 0,
+ .m_device_disable_merge_right_count = 0,
+ .m_ipc_lock_count = 0,
+ .m_device_use_count = 0,
+ .m_ipc_disable_merge_count = 0,
+ .m_permission = KMemoryPermission::None,
+ .m_attribute = KMemoryAttribute::None,
+ .m_original_permission = KMemoryPermission::None,
+ .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
+ };
+ out_page_info->flags = 0;
+
+ R_SUCCEED();
+ }
+
+ // Otherwise, lock the table and query.
+ KScopedLightLock lk(m_general_lock);
+ R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
+}
+
+Result KPageTableBase::QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out,
+ KProcessAddress address) const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Align the address down to page size.
+ address = Common::AlignDown(GetInteger(address), PageSize);
+
+ // Verify that we can query the address.
+ KMemoryInfo info;
+ Svc::PageInfo page_info;
+ R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryState(info, KMemoryState::FlagCanQueryPhysical,
+ KMemoryState::FlagCanQueryPhysical,
+ KMemoryPermission::UserReadExecute, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Prepare to traverse.
+ KPhysicalAddress phys_addr;
+ size_t phys_size;
+
+ KProcessAddress virt_addr = info.GetAddress();
+ KProcessAddress end_addr = info.GetEndAddress();
+
+ // Perform traversal.
+ {
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ m_impl->BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Set tracking variables.
+ phys_addr = next_entry.phys_addr;
+ phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+
+ // Iterate.
+ while (true) {
+ // Continue the traversal.
+ traverse_valid =
+ m_impl->ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ if (!traverse_valid) {
+ break;
+ }
+
+ if (next_entry.phys_addr != (phys_addr + phys_size)) {
+ // Check if we're done.
+ if (virt_addr <= address && address <= virt_addr + phys_size - 1) {
+ break;
+ }
+
+ // Advance.
+ phys_addr = next_entry.phys_addr;
+ virt_addr += next_entry.block_size;
+ phys_size =
+ next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+ } else {
+ phys_size += next_entry.block_size;
+ }
+
+ // Check if we're done.
+ if (end_addr < virt_addr + phys_size) {
+ break;
+ }
+ }
+ ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1);
+
+ // Ensure we use the right size.
+ if (end_addr < virt_addr + phys_size) {
+ phys_size = end_addr - virt_addr;
+ }
+ }
+
+ // Set the output.
+ out->physical_address = GetInteger(phys_addr);
+ out->virtual_address = GetInteger(virt_addr);
+ out->size = phys_size;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoImpl(KProcessAddress* out, PageLinkedList* page_list,
+ KPhysicalAddress phys_addr, size_t size, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(size > 0);
+
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress last = phys_addr + size - 1;
+
+ // Get region extents.
+ const KProcessAddress region_start = m_kernel_map_region_start;
+ const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
+ const size_t region_num_pages = region_size / PageSize;
+
+ ASSERT(this->CanContain(region_start, region_size, state));
+
+ // Locate the memory region.
+ const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ ASSERT(region->Contains(GetInteger(phys_addr)));
+
+ // Ensure that the region is mappable.
+ const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+ while (true) {
+ // Check that the region exists.
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ // Check the region attributes.
+ R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+ ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+
+ // Check if we're done.
+ if (GetInteger(last) <= region->GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ region = region->GetNext();
+ };
+
+ // Select an address to map at.
+ KProcessAddress addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const KPhysicalAddress aligned_phys =
+ Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+ R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+ const KPhysicalAddress last_aligned_paddr =
+ Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+ R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+ ResultInvalidAddress);
+
+ addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+ this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ }
+
+ // Check that we can map IO here.
+ ASSERT(this->CanContain(addr, size, state));
+ R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, state == KMemoryState::IoRegister, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType::Map,
+ false));
+
+ // Set the output address.
+ *out = addr;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the io memory.
+ KProcessAddress addr;
+ R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size,
+ KMemoryState::IoRegister, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize,
+ KMemoryState::IoRegister, perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+ size_t size, Svc::MemoryMapping mapping,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size,
+ KMemoryState::All, KMemoryState::None, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm);
+ const KPageProperties properties = {perm, mapping == Svc::MemoryMapping::IoRegister,
+ mapping == Svc::MemoryMapping::Uncached,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+
+ // Update the blocks.
+ const auto state =
+ mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister;
+ m_memory_block_manager.Update(
+ std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute::Locked,
+ KMemoryBlockDisableMergeAttribute::Normal, KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr,
+ size_t size, Svc::MemoryMapping mapping) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ KMemoryAttribute old_attr;
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
+ std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+ mapping == Svc::MemoryMapping::Memory ? KMemoryState::IoMemory : KMemoryState::IoRegister,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked));
+
+ // Validate that the region being unmapped corresponds to the physical range described.
+ {
+ // Get the impl.
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ ASSERT(
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address));
+
+ // Check that the physical region matches.
+ R_UNLESS(next_entry.phys_addr == phys_addr, ResultInvalidMemoryRegion);
+
+ // Iterate.
+ for (size_t checked_size =
+ next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
+ checked_size < size; checked_size += next_entry.block_size) {
+ // Continue the traversal.
+ ASSERT(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
+
+ // Check that the physical region matches.
+ R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, ResultInvalidMemoryRegion);
+ }
+ }
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // If the region being unmapped is Memory, synchronize.
+ if (mapping == Svc::MemoryMapping::Memory) {
+ // Change the region to be uncached.
+ const KPageProperties properties = {old_perm, false, true, DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, properties,
+ OperationType::ChangePermissionsAndRefresh, false));
+
+ // Temporarily unlock ourselves, so that other operations can occur while we flush the
+ // region.
+ m_general_lock.Unlock();
+ SCOPE_EXIT({ m_general_lock.Lock(); });
+
+ // Flush the region.
+ R_ASSERT(FlushDataCache(dst_address, size));
+ }
+
+ // Perform the unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ ASSERT(Common::IsAligned(GetInteger(phys_addr), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(size > 0);
+ R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress last = phys_addr + size - 1;
+
+ // Get region extents.
+ const KProcessAddress region_start = this->GetRegionAddress(KMemoryState::Static);
+ const size_t region_size = this->GetRegionSize(KMemoryState::Static);
+ const size_t region_num_pages = region_size / PageSize;
+
+ // Locate the memory region.
+ const KMemoryRegion* region = KMemoryLayout::Find(m_kernel.MemoryLayout(), phys_addr);
+ R_UNLESS(region != nullptr, ResultInvalidAddress);
+
+ ASSERT(region->Contains(GetInteger(phys_addr)));
+ R_UNLESS(GetInteger(last) <= region->GetLastAddress(), ResultInvalidAddress);
+
+ // Check the region attributes.
+ const bool is_rw = perm == KMemoryPermission::UserReadWrite;
+ R_UNLESS(region->IsDerivedFrom(KMemoryRegionType_Dram), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), ResultInvalidAddress);
+ R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw,
+ ResultInvalidAddress);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Select an address to map at.
+ KProcessAddress addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const KPhysicalAddress aligned_phys =
+ Common::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
+ R_UNLESS(aligned_phys > phys_addr, ResultInvalidAddress);
+
+ const KPhysicalAddress last_aligned_paddr =
+ Common::AlignDown(GetInteger(last) + 1, alignment) - 1;
+ R_UNLESS((last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr),
+ ResultInvalidAddress);
+
+ addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+ this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ }
+
+ // Check that we can map static here.
+ ASSERT(this->CanContain(addr, size, KMemoryState::Static));
+ R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState::Static,
+ perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+ // Get the memory region.
+ const KMemoryRegion* region =
+ m_kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(region_type);
+ R_UNLESS(region != nullptr, ResultOutOfRange);
+
+ // Check that the region is valid.
+ ASSERT(region->GetEndAddress() != 0);
+
+ // Map the region.
+ R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)){
+ R_CONVERT(ResultInvalidAddress, ResultOutOfRange)} R_END_TRY_CATCH;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
+
+ // Ensure this is a valid map request.
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(Common::IsAligned(GetInteger(addr), alignment));
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ R_ASSERT(this->CheckMemoryState(
+ addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ if (is_pa_valid) {
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties,
+ OperationType::Map, false));
+ } else {
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
+ }
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ // Check that the map is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Map the pages.
+ R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
+ // Check that the unmap is in range.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform the unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Find a random address to map at.
+ KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize,
+ 0, this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ R_ASSERT(this->CheckMemoryState(
+ addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid map request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to map.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size,
+ KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform mapping operation.
+ const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
+ R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // We successfully mapped the pages.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup& pg,
+ KMemoryState state) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Ensure this is a valid unmap request.
+ const size_t num_pages = pg.GetNumPages();
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to unmap.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Check that the page group is valid.
+ R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), ResultInvalidCurrentMemory);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Perform unmapping operation.
+ const KPageProperties properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), address, num_pages, 0, false, properties,
+ OperationType::Unmap, false));
+
+ // Update the blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address,
+ size_t num_pages, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) {
+ // Ensure that the page group isn't null.
+ ASSERT(out != nullptr);
+
+ // Make sure that the region we're mapping is valid for the table.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check if state allows us to create the group.
+ R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Create a new page group for the region.
+ R_TRY(this->MakePageGroup(*out, address, num_pages));
+
+ // Open a new reference to the pages in the group.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+ // Check that the region is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Check that the pages are linearly mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Invalidate the block.
+ if (cur_size > 0) {
+ // NOTE: Nintendo does not check the result of invalidation.
+ InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ // Advance.
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Check that the last block is linearly mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Invalidate the last block.
+ if (cur_size > 0) {
+ // NOTE: Nintendo does not check the result of invalidation.
+ InvalidateDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
+ // Check pre-condition: this is being called on the current process.
+ ASSERT(this == std::addressof(GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable()));
+
+ // Check that the region is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserReadWrite, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Uncached, KMemoryAttribute::None));
+
+ // Invalidate the data cache.
+ R_RETURN(InvalidateDataCache(address, size));
+}
+
+Result KPageTableBase::ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lightly validate the region is in range.
+ R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Require that the memory either be user readable or debuggable.
+ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ src_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserRead,
+ KMemoryPermission::UserRead, KMemoryAttribute::None, KMemoryAttribute::None));
+ if (!can_read) {
+ const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ src_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+ R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+ }
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+ auto& dst_memory = GetCurrentMemory(m_system.Kernel());
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ FlushDataCache(copy_src, copy_size);
+ R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, copy_size), ResultInvalidPointer);
+
+ dst_address += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ const void* copy_src = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ FlushDataCache(copy_src, cur_size);
+ R_UNLESS(dst_memory.WriteBlock(dst_address, copy_src, cur_size), ResultInvalidPointer);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ dst_address += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size) {
+ // Lightly validate the region is in range.
+ R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Require that the memory either be user writable or debuggable.
+ const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::None, KMemoryState::None, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+ if (!can_read) {
+ const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::FlagCanDebug, KMemoryState::FlagCanDebug,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
+ R_UNLESS(can_debug, ResultInvalidCurrentMemory);
+ }
+
+ // Get the impl.
+ auto& impl = this->GetImpl();
+ auto& src_memory = GetCurrentMemory(m_system.Kernel());
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address);
+ R_UNLESS(traverse_valid, ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, copy_size),
+ ResultInvalidCurrentMemory);
+
+ StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), copy_size);
+
+ src_address += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ void* copy_dst = GetLinearMappedVirtualPointer(m_kernel, cur_addr);
+ R_UNLESS(src_memory.ReadBlock(src_address, copy_dst, cur_size),
+ ResultInvalidCurrentMemory);
+
+ StoreDataCache(GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ src_address += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+
+ // Invalidate the instruction cache, as this svc allows modifying executable pages.
+ InvalidateInstructionCache(m_system, dst_address, size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr,
+ size_t size, KMemoryState state) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Determine the mapping extents.
+ const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+ const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+ const size_t map_size = map_end - map_start;
+
+ // Get the memory reference to write into.
+ auto& dst_memory = GetCurrentMemory(m_kernel);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Temporarily map the io memory.
+ KProcessAddress io_addr;
+ R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+ state, KMemoryPermission::UserRead));
+
+ // Ensure we unmap the io memory when we're done with it.
+ const KPageProperties unmap_properties =
+ KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+ SCOPE_EXIT({
+ R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ });
+
+ // Read the memory.
+ const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+ dst_memory.CopyBlock(dst_addr, read_addr, size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr,
+ size_t size, KMemoryState state) {
+ // Check pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ // Determine the mapping extents.
+ const KPhysicalAddress map_start = Common::AlignDown(GetInteger(phys_addr), PageSize);
+ const KPhysicalAddress map_end = Common::AlignUp(GetInteger(phys_addr) + size, PageSize);
+ const size_t map_size = map_end - map_start;
+
+ // Get the memory reference to read from.
+ auto& src_memory = GetCurrentMemory(m_kernel);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Temporarily map the io memory.
+ KProcessAddress io_addr;
+ R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size,
+ state, KMemoryPermission::UserReadWrite));
+
+ // Ensure we unmap the io memory when we're done with it.
+ const KPageProperties unmap_properties =
+ KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
+ SCOPE_EXIT({
+ R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ });
+
+ // Write the memory.
+ const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
+ R_UNLESS(src_memory.CopyBlock(write_addr, src_addr, size), ResultInvalidPointer);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size, KMemoryState state) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_address, size), ResultInvalidCurrentMemory);
+
+ // We need to lock both this table, and the current process's table, so set up some aliases.
+ KPageTableBase& src_page_table = *this;
+ KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the desired range is readable io memory.
+ R_TRY(this->CheckMemoryStateContiguous(src_address, size, KMemoryState::All, state,
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Read the memory.
+ KProcessAddress dst = dst_address;
+ const KProcessAddress last_address = src_address + size - 1;
+ while (src_address <= last_address) {
+ // Get the current physical address.
+ KPhysicalAddress phys_addr;
+ ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), src_address));
+
+ // Determine the current read size.
+ const size_t cur_size =
+ std::min<size_t>(last_address - src_address + 1,
+ Common::AlignDown(GetInteger(src_address) + PageSize, PageSize) -
+ GetInteger(src_address));
+
+ // Read.
+ R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
+
+ // Advance.
+ src_address += cur_size;
+ dst += cur_size;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address,
+ size_t size, KMemoryState state) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_address, size), ResultInvalidCurrentMemory);
+
+ // We need to lock both this table, and the current process's table, so set up some aliases.
+ KPageTableBase& src_page_table = *this;
+ KPageTableBase& dst_page_table = GetCurrentProcess(m_kernel).GetPageTable().GetBasePageTable();
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the desired range is writable io memory.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_address, size, KMemoryState::All, state, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Read the memory.
+ KProcessAddress src = src_address;
+ const KProcessAddress last_address = dst_address + size - 1;
+ while (dst_address <= last_address) {
+ // Get the current physical address.
+ KPhysicalAddress phys_addr;
+ ASSERT(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), dst_address));
+
+ // Determine the current read size.
+ const size_t cur_size =
+ std::min<size_t>(last_address - dst_address + 1,
+ Common::AlignDown(GetInteger(dst_address) + PageSize, PageSize) -
+ GetInteger(dst_address));
+
+ // Read.
+ R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
+
+ // Advance.
+ dst_address += cur_size;
+ src += cur_size;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address,
+ size_t size, KMemoryPermission perm,
+ bool is_aligned, bool check_heap) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ const KMemoryState test_state =
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+ size_t num_allocator_blocks;
+ KMemoryState old_state;
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
+ std::addressof(num_allocator_blocks), address, size, test_state,
+ test_state, perm, perm,
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
+ KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::ShareToDevice, KMemoryPermission::None);
+
+ // Set whether the locked memory was io.
+ *out_is_io =
+ static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size,
+ bool check_heap) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ const KMemoryState test_state =
+ KMemoryState::FlagCanDeviceMap |
+ (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size, test_state, test_state,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ const KMemoryBlockManager::MemoryBlockLockFunction lock_func =
+ m_enable_device_address_space_merge
+ ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare
+ : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
+ &KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+ // Lightly validate the range before doing anything else.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ size_t allocator_num_blocks = 0;
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(allocator_num_blocks), address, size, KMemoryState::FlagCanDeviceMap,
+ KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // Create an update allocator for the region.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, allocator_num_blocks);
+ R_TRY(allocator_result);
+
+ // Update the memory blocks.
+ m_memory_block_manager.UpdateLock(
+ std::addressof(allocator), address, num_pages,
+ m_enable_device_address_space_merge
+ ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare
+ : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm,
+ bool is_aligned) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ const KMemoryState test_state =
+ (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, test_state, test_state, perm, perm,
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, KMemoryAttribute::None));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out,
+ KProcessAddress address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, KMemoryState::FlagCanDeviceMap, KMemoryState::FlagCanDeviceMap,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address,
+ size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
+ nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
+ KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, nullptr));
+}
+
+Result KPageTableBase::LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm) {
+ R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size, KMemoryState::FlagCanTransfer,
+ KMemoryState::FlagCanTransfer, KMemoryPermission::All,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None, perm, KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanTransfer,
+ KMemoryState::FlagCanTransfer, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+ R_RETURN(this->LockMemoryAndOpen(
+ out, nullptr, address, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked));
+}
+
+Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size,
+ const KPageGroup& pg) {
+ R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, std::addressof(pg)));
+}
+
+Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange* out,
+ KProcessAddress address,
+ size_t size) {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Get the range.
+ R_TRY(this->GetContiguousMemoryRangeWithState(
+ out, address, size, KMemoryState::FlagReferenceCounted, KMemoryState::FlagReferenceCounted,
+ KMemoryPermission::UserRead, KMemoryPermission::UserRead, KMemoryAttribute::Uncached,
+ KMemoryAttribute::None));
+
+ // We got the range, so open it.
+ out->Open();
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToUser(
+ KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+ KMemoryAttribute src_attr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+ // Get the destination memory reference.
+ auto& dst_memory = GetCurrentMemory(m_kernel);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ R_UNLESS(dst_memory.WriteBlock(dst_addr,
+ GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+ copy_size),
+ ResultInvalidCurrentMemory);
+
+ dst_addr += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ R_UNLESS(dst_memory.WriteBlock(
+ dst_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+ ResultInvalidCurrentMemory);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ dst_addr += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromLinearToKernel(
+ void* buffer, size_t size, KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm, KMemoryAttribute src_attr_mask,
+ KMemoryAttribute src_attr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(src_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(buffer, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size);
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromUserToLinear(
+ KProcessAddress dst_addr, size_t size, KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm, KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Get the source memory reference.
+ auto& src_memory = GetCurrentMemory(m_kernel);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy as much aligned data as we can.
+ if (cur_size >= sizeof(u32)) {
+ const size_t copy_size = Common::AlignDown(cur_size, sizeof(u32));
+ R_UNLESS(src_memory.ReadBlock(src_addr,
+ GetLinearMappedVirtualPointer(m_kernel, cur_addr),
+ copy_size),
+ ResultInvalidCurrentMemory);
+ src_addr += copy_size;
+ cur_addr += copy_size;
+ cur_size -= copy_size;
+ }
+
+ // Copy remaining data.
+ if (cur_size > 0) {
+ R_UNLESS(src_memory.ReadBlock(
+ src_addr, GetLinearMappedVirtualPointer(m_kernel, cur_addr), cur_size),
+ ResultInvalidCurrentMemory);
+ }
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ src_addr += cur_size;
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask,
+ KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask,
+ KMemoryAttribute dst_attr, void* buffer) {
+ // Lightly validate the range before doing anything else.
+ R_UNLESS(this->Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Check memory state.
+ R_TRY(this->CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ auto& impl = this->GetImpl();
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid =
+ impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_addr = next_entry.phys_addr;
+ size_t cur_size =
+ next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ auto PerformCopy = [&]() -> Result {
+ // Ensure the address is linear mapped.
+ R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetLinearMappedVirtualPointer(m_kernel, cur_addr), buffer, cur_size);
+
+ R_SUCCEED();
+ };
+
+ // Iterate.
+ while (tot_size < size) {
+ // Continue the traversal.
+ traverse_valid =
+ impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ // Perform copy.
+ R_TRY(PerformCopy());
+
+ // Advance.
+ buffer = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we use the right size for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Perform copy for the last block.
+ R_TRY(PerformCopy());
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeap(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ // For convenience, alias this.
+ KPageTableBase& src_page_table = *this;
+
+ // Lightly validate the ranges before doing anything else.
+ R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check memory state.
+ R_TRY(src_page_table.CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+ R_TRY(dst_page_table.CheckMemoryStateContiguous(
+ dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm,
+ dst_attr_mask | KMemoryAttribute::Uncached, dst_attr));
+
+ // Get implementations.
+ auto& src_impl = src_page_table.GetImpl();
+ auto& dst_impl = dst_page_table.GetImpl();
+
+ // Prepare for traversal.
+ TraversalContext src_context;
+ TraversalContext dst_context;
+ TraversalEntry src_next_entry;
+ TraversalEntry dst_next_entry;
+ bool traverse_valid;
+
+ // Begin traversal.
+ traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context), src_addr);
+ ASSERT(traverse_valid);
+ traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+ KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+ size_t cur_src_size = src_next_entry.block_size -
+ (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+ size_t cur_dst_size = dst_next_entry.block_size -
+ (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+ // Adjust the initial block sizes.
+ src_next_entry.block_size = cur_src_size;
+ dst_next_entry.block_size = cur_dst_size;
+
+ // Before we get any crazier, succeed if there's nothing to do.
+ R_SUCCEED_IF(size == 0);
+
+ // We're going to manage dual traversal via an offset against the total size.
+ KPhysicalAddress cur_src_addr = cur_src_block_addr;
+ KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+ size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+ // Iterate.
+ size_t ofs = 0;
+ while (ofs < size) {
+ // Determine how much we can copy this iteration.
+ const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+ // If we need to advance the traversals, do so.
+ bool updated_src = false, updated_dst = false, skip_copy = false;
+ if (ofs + cur_copy_size != size) {
+ if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+ // Continue the src traversal.
+ traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context));
+ ASSERT(traverse_valid);
+
+ // Update source.
+ updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+ }
+
+ if (cur_dst_addr + cur_min_size ==
+ dst_next_entry.phys_addr + dst_next_entry.block_size) {
+ // Continue the dst traversal.
+ traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context));
+ ASSERT(traverse_valid);
+
+ // Update destination.
+ updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+ }
+
+ // If we didn't update either of source/destination, skip the copy this iteration.
+ if (!updated_src && !updated_dst) {
+ skip_copy = true;
+
+ // Update the source block address.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ }
+ }
+
+ // Do the copy, unless we're skipping it.
+ if (!skip_copy) {
+ // We need both ends of the copy to be heap blocks.
+ R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+ R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+ GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+ // Update.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+ cur_dst_block_addr = dst_next_entry.phys_addr;
+ cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+ // Advance offset.
+ ofs += cur_copy_size;
+ }
+
+ // Update min size.
+ cur_src_size = src_next_entry.block_size;
+ cur_dst_size = dst_next_entry.block_size;
+ cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+ cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ // For convenience, alias this.
+ KPageTableBase& src_page_table = *this;
+
+ // Lightly validate the ranges before doing anything else.
+ R_UNLESS(src_page_table.Contains(src_addr, size), ResultInvalidCurrentMemory);
+ R_UNLESS(dst_page_table.Contains(dst_addr, size), ResultInvalidCurrentMemory);
+
+ // Copy the memory.
+ {
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check memory state for source.
+ R_TRY(src_page_table.CheckMemoryStateContiguous(
+ src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm,
+ src_attr_mask | KMemoryAttribute::Uncached, src_attr));
+
+ // Destination state is intentionally unchecked.
+
+ // Get implementations.
+ auto& src_impl = src_page_table.GetImpl();
+ auto& dst_impl = dst_page_table.GetImpl();
+
+ // Prepare for traversal.
+ TraversalContext src_context;
+ TraversalContext dst_context;
+ TraversalEntry src_next_entry;
+ TraversalEntry dst_next_entry;
+ bool traverse_valid;
+
+ // Begin traversal.
+ traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context), src_addr);
+ ASSERT(traverse_valid);
+ traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context), dst_addr);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
+ KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
+ size_t cur_src_size = src_next_entry.block_size -
+ (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
+ size_t cur_dst_size = dst_next_entry.block_size -
+ (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
+
+ // Adjust the initial block sizes.
+ src_next_entry.block_size = cur_src_size;
+ dst_next_entry.block_size = cur_dst_size;
+
+ // Before we get any crazier, succeed if there's nothing to do.
+ R_SUCCEED_IF(size == 0);
+
+ // We're going to manage dual traversal via an offset against the total size.
+ KPhysicalAddress cur_src_addr = cur_src_block_addr;
+ KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
+ size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
+
+ // Iterate.
+ size_t ofs = 0;
+ while (ofs < size) {
+ // Determine how much we can copy this iteration.
+ const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
+
+ // If we need to advance the traversals, do so.
+ bool updated_src = false, updated_dst = false, skip_copy = false;
+ if (ofs + cur_copy_size != size) {
+ if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
+ // Continue the src traversal.
+ traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry),
+ std::addressof(src_context));
+ ASSERT(traverse_valid);
+
+ // Update source.
+ updated_src = cur_src_addr + cur_min_size != src_next_entry.phys_addr;
+ }
+
+ if (cur_dst_addr + cur_min_size ==
+ dst_next_entry.phys_addr + dst_next_entry.block_size) {
+ // Continue the dst traversal.
+ traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry),
+ std::addressof(dst_context));
+ ASSERT(traverse_valid);
+
+ // Update destination.
+ updated_dst = cur_dst_addr + cur_min_size != dst_next_entry.phys_addr;
+ }
+
+ // If we didn't update either of source/destination, skip the copy this iteration.
+ if (!updated_src && !updated_dst) {
+ skip_copy = true;
+
+ // Update the source block address.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ }
+ }
+
+ // Do the copy, unless we're skipping it.
+ if (!skip_copy) {
+ // We need both ends of the copy to be heap blocks.
+ R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), ResultInvalidCurrentMemory);
+ R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), ResultInvalidCurrentMemory);
+
+ // Copy the data.
+ std::memcpy(GetHeapVirtualPointer(m_kernel, cur_dst_addr),
+ GetHeapVirtualPointer(m_kernel, cur_src_addr), cur_copy_size);
+
+ // Update.
+ cur_src_block_addr = src_next_entry.phys_addr;
+ cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
+ cur_dst_block_addr = dst_next_entry.phys_addr;
+ cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
+
+ // Advance offset.
+ ofs += cur_copy_size;
+ }
+
+ // Update min size.
+ cur_src_size = src_next_entry.block_size;
+ cur_dst_size = dst_next_entry.block_size;
+ cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size,
+ cur_dst_block_addr - cur_dst_addr + cur_dst_size);
+ }
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size,
+ KMemoryPermission test_perm, KMemoryState dst_state) {
+ // Validate pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
+ test_perm == KMemoryPermission::UserRead);
+
+ // Check that the address is in range.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get the source permission.
+ const auto src_perm = static_cast<KMemoryPermission>(
+ (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead);
+
+ // Get aligned extents.
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_src_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+
+ const auto aligned_src_last = GetInteger(aligned_src_end) - 1;
+ const auto mapping_src_last = GetInteger(mapping_src_end) - 1;
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
+ src_perm);
+ }
+ };
+
+ size_t blocks_needed = 0;
+
+ // Iterate, mapping as needed.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Validate the current block.
+ R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
+ test_attr_mask, KMemoryAttribute::None));
+
+ if (mapping_src_start < mapping_src_end &&
+ GetInteger(mapping_src_start) < info.GetEndAddress() &&
+ info.GetAddress() < GetInteger(mapping_src_end)) {
+ const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start)
+ ? info.GetAddress()
+ : GetInteger(mapping_src_start);
+ const auto cur_end = mapping_src_last >= info.GetLastAddress()
+ ? info.GetEndAddress()
+ : GetInteger(mapping_src_end);
+ const size_t cur_size = cur_end - cur_start;
+
+ if (info.GetAddress() < GetInteger(mapping_src_start)) {
+ ++blocks_needed;
+ }
+ if (mapping_src_last < info.GetLastAddress()) {
+ ++blocks_needed;
+ }
+
+ // Set the permissions on the block, if we need to.
+ if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
+ const DisableMergeAttribute head_body_attr =
+ (GetInteger(mapping_src_start) >= info.GetAddress())
+ ? DisableMergeAttribute::DisableHeadAndBody
+ : DisableMergeAttribute::None;
+ const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end))
+ ? DisableMergeAttribute::DisableTail
+ : DisableMergeAttribute::None;
+ const KPageProperties properties = {
+ src_perm, false, false,
+ static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, 0, false, properties,
+ OperationType::ChangePermissions, false));
+ }
+
+ // Note that we mapped this part.
+ mapped_size += cur_size;
+ }
+
+ // If the block is at the end, we're done.
+ if (aligned_src_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+
+ if (out_blocks_needed != nullptr) {
+ ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ *out_blocks_needed = blocks_needed;
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
+ KProcessAddress src_addr, KMemoryPermission test_perm,
+ KMemoryState dst_state, KPageTableBase& src_page_table,
+ bool send) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(src_page_table.IsLockedByCurrentThread());
+
+ // Check that we can theoretically map.
+ const KProcessAddress region_start = m_alias_region_start;
+ const size_t region_size = m_alias_region_end - m_alias_region_start;
+ R_UNLESS(size < region_size, ResultOutOfAddressSpace);
+
+ // Get aligned source extents.
+ const KProcessAddress src_start = src_addr;
+ const KProcessAddress src_end = src_addr + size;
+ const KProcessAddress aligned_src_start = Common::AlignDown(GetInteger(src_start), PageSize);
+ const KProcessAddress aligned_src_end = Common::AlignUp(GetInteger(src_start) + size, PageSize);
+ const KProcessAddress mapping_src_start = Common::AlignUp(GetInteger(src_start), PageSize);
+ const KProcessAddress mapping_src_end =
+ Common::AlignDown(GetInteger(src_start) + size, PageSize);
+ const size_t aligned_src_size = aligned_src_end - aligned_src_start;
+ const size_t mapping_src_size =
+ (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
+
+ // Select a random address to map at.
+ KProcessAddress dst_addr = 0;
+ {
+ const size_t alignment = 4_KiB;
+ const size_t offset = GetInteger(aligned_src_start) & (alignment - 1);
+
+ dst_addr =
+ this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
+ alignment, offset, this->GetNumGuardPages());
+ R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
+ }
+
+ // Check that we can perform the operation we're about to perform.
+ ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Reserve space for any partial pages we allocate.
+ const size_t unmapped_size = aligned_src_size - mapping_src_size;
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, unmapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Ensure that we manage page references correctly.
+ KPhysicalAddress start_partial_page = 0;
+ KPhysicalAddress end_partial_page = 0;
+ KProcessAddress cur_mapped_addr = dst_addr;
+
+ // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
+ // free on scope exit.
+ SCOPE_EXIT({
+ if (start_partial_page != 0) {
+ m_kernel.MemoryManager().Close(start_partial_page, 1);
+ }
+ if (end_partial_page != 0) {
+ m_kernel.MemoryManager().Close(end_partial_page, 1);
+ }
+ });
+
+ ON_RESULT_FAILURE {
+ if (cur_mapped_addr != dst_addr) {
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), dst_addr,
+ (cur_mapped_addr - dst_addr) / PageSize, 0, false,
+ unmap_properties, OperationType::Unmap, true));
+ }
+ };
+
+ // Allocate the start page as needed.
+ if (aligned_src_start < mapping_src_start) {
+ start_partial_page =
+ m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Allocate the end page as needed.
+ if (mapping_src_end < aligned_src_end &&
+ (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
+ end_partial_page =
+ m_kernel.MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
+ R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
+ }
+
+ // Get the implementation.
+ auto& src_impl = src_page_table.GetImpl();
+
+ // Get the fill value for partial pages.
+ const auto fill_val = m_ipc_fill_value;
+
+ // Begin traversal.
+ TraversalContext context;
+ TraversalEntry next_entry;
+ bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry),
+ std::addressof(context), aligned_src_start);
+ ASSERT(traverse_valid);
+
+ // Prepare tracking variables.
+ KPhysicalAddress cur_block_addr = next_entry.phys_addr;
+ size_t cur_block_size =
+ next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1));
+ size_t tot_block_size = cur_block_size;
+
+ // Map the start page, if we have one.
+ if (start_partial_page != 0) {
+ // Ensure the page holds correct data.
+ u8* const start_partial_virt = GetHeapVirtualPointer(m_kernel, start_partial_page);
+ if (send) {
+ const size_t partial_offset = src_start - aligned_src_start;
+ size_t copy_size, clear_size;
+ if (src_end < mapping_src_start) {
+ copy_size = size;
+ clear_size = mapping_src_start - src_end;
+ } else {
+ copy_size = mapping_src_start - src_start;
+ clear_size = 0;
+ }
+
+ std::memset(start_partial_virt, fill_val, partial_offset);
+ std::memcpy(start_partial_virt + partial_offset,
+ GetHeapVirtualPointer(m_kernel, cur_block_addr) + partial_offset,
+ copy_size);
+ if (clear_size > 0) {
+ std::memset(start_partial_virt + partial_offset + copy_size, fill_val, clear_size);
+ }
+ } else {
+ std::memset(start_partial_virt, fill_val, PageSize);
+ }
+
+ // Map the page.
+ const KPageProperties start_map_properties = {test_perm, false, false,
+ DisableMergeAttribute::DisableHead};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true,
+ start_map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += PageSize;
+ cur_block_addr += PageSize;
+ cur_block_size -= PageSize;
+
+ // If the block's size was one page, we may need to continue traversal.
+ if (cur_block_size == 0 && aligned_src_size > PageSize) {
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ tot_block_size += next_entry.block_size;
+ }
+ }
+
+ // Map the remaining pages.
+ while (aligned_src_start + tot_block_size < mapping_src_end) {
+ // Continue the traversal.
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ // Process the block.
+ if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
+ // Map the block we've been processing so far.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize,
+ cur_block_addr, true, map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += cur_block_size;
+ cur_block_addr = next_entry.phys_addr;
+ cur_block_size = next_entry.block_size;
+ } else {
+ cur_block_size += next_entry.block_size;
+ }
+ tot_block_size += next_entry.block_size;
+ }
+
+ // Handle the last direct-mapped page.
+ if (const KProcessAddress mapped_block_end =
+ aligned_src_start + tot_block_size - cur_block_size;
+ mapped_block_end < mapping_src_end) {
+ const size_t last_block_size = mapping_src_end - mapped_block_end;
+
+ // Map the last block.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize,
+ cur_block_addr, true, map_properties, OperationType::Map, false));
+
+ // Update tracking extents.
+ cur_mapped_addr += last_block_size;
+ cur_block_addr += last_block_size;
+ if (mapped_block_end + cur_block_size < aligned_src_end &&
+ cur_block_size == last_block_size) {
+ traverse_valid =
+ src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
+ ASSERT(traverse_valid);
+
+ cur_block_addr = next_entry.phys_addr;
+ }
+ }
+
+ // Map the end page, if we have one.
+ if (end_partial_page != 0) {
+ // Ensure the page holds correct data.
+ u8* const end_partial_virt = GetHeapVirtualPointer(m_kernel, end_partial_page);
+ if (send) {
+ const size_t copy_size = src_end - mapping_src_end;
+ std::memcpy(end_partial_virt, GetHeapVirtualPointer(m_kernel, cur_block_addr),
+ copy_size);
+ std::memset(end_partial_virt + copy_size, fill_val, PageSize - copy_size);
+ } else {
+ std::memset(end_partial_virt, fill_val, PageSize);
+ }
+
+ // Map the page.
+ const KPageProperties map_properties = {test_perm, false, false,
+ (cur_mapped_addr == dst_addr)
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true,
+ map_properties, OperationType::Map, false));
+ }
+
+ // Update memory blocks to reflect our changes
+ m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
+ dst_state, test_perm, KMemoryAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ // Set the output address.
+ *out_addr = dst_addr + (src_start - aligned_src_start);
+
+ // We succeeded.
+ memory_reservation.Commit();
+ R_SUCCEED();
+}
+
+Result KPageTableBase::SetupForIpc(KProcessAddress* out_dst_addr, size_t size,
+ KProcessAddress src_addr, KPageTableBase& src_page_table,
+ KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
+ // For convenience, alias this.
+ KPageTableBase& dst_page_table = *this;
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(std::addressof(src_page_table));
+
+ // Perform client setup.
+ size_t num_allocator_blocks;
+ R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
+ std::addressof(num_allocator_blocks), src_addr, size,
+ test_perm, dst_state));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ src_page_table.m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // Get the mapped extents.
+ const KProcessAddress src_map_start = Common::AlignUp(GetInteger(src_addr), PageSize);
+ const KProcessAddress src_map_end = Common::AlignDown(GetInteger(src_addr) + size, PageSize);
+ const size_t src_map_size = src_map_end - src_map_start;
+
+ // Ensure that we clean up appropriately if we fail after this.
+ const auto src_perm = static_cast<KMemoryPermission>(
+ (test_perm == KMemoryPermission::UserReadWrite)
+ ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
+ : KMemoryPermission::UserRead);
+ ON_RESULT_FAILURE {
+ if (src_map_end > src_map_start) {
+ src_page_table.CleanupForIpcClientOnServerSetupFailure(
+ updater.GetPageList(), src_map_start, src_map_size, src_perm);
+ }
+ };
+
+ // Perform server setup.
+ R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
+ src_page_table, send));
+
+ // If anything was mapped, ipc-lock the pages.
+ if (src_map_start < src_map_end) {
+ // Get the source permission.
+ src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
+ (src_map_end - src_map_start) / PageSize,
+ &KMemoryBlock::LockForIpc, src_perm);
+ }
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Validate the memory state.
+ size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, dst_state, KMemoryPermission::UserRead,
+ KMemoryPermission::UserRead, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Get aligned extents.
+ const KProcessAddress aligned_start = Common::AlignDown(GetInteger(address), PageSize);
+ const KProcessAddress aligned_end = Common::AlignUp(GetInteger(address) + size, PageSize);
+ const size_t aligned_size = aligned_end - aligned_start;
+ const size_t aligned_num_pages = aligned_size / PageSize;
+
+ // Unmap the pages.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+
+ // Update memory blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
+ KMemoryState::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ // Release from the resource limit as relevant.
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ aligned_size - mapping_size);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size,
+ KMemoryState dst_state) {
+ // Validate the address.
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Get aligned source extents.
+ const KProcessAddress mapping_start = Common::AlignUp(GetInteger(address), PageSize);
+ const KProcessAddress mapping_end = Common::AlignDown(GetInteger(address) + size, PageSize);
+ const KProcessAddress mapping_last = mapping_end - 1;
+ const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
+
+ // If nothing was mapped, we're actually done immediately.
+ R_SUCCEED_IF(mapping_size == 0);
+
+ // Get the test state and attribute mask.
+ KMemoryState test_state;
+ KMemoryAttribute test_attr_mask;
+ switch (dst_state) {
+ case KMemoryState::Ipc:
+ test_state = KMemoryState::FlagCanUseIpc;
+ test_attr_mask =
+ KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonSecureIpc:
+ test_state = KMemoryState::FlagCanUseNonSecureIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ case KMemoryState::NonDeviceIpc:
+ test_state = KMemoryState::FlagCanUseNonDeviceIpc;
+ test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
+ break;
+ default:
+ R_THROW(ResultInvalidCombination);
+ }
+
+ // Lock the table.
+ // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
+ // convention elsewhere in KPageTableBase.
+ KScopedLightLock lk(m_general_lock);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Ensure that on failure, we roll back appropriately.
+ size_t mapped_size = 0;
+ ON_RESULT_FAILURE {
+ if (mapped_size > 0) {
+ // Determine where the mapping ends.
+ const auto mapped_end = GetInteger(mapping_start) + mapped_size;
+ const auto mapped_last = mapped_end - 1;
+
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Get the current block info.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+
+ // Create tracking variables.
+ KProcessAddress cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first = cur_info.GetIpcDisableMergeCount() == 1 &&
+ False(cur_info.GetDisableMergeAttribute() &
+ KMemoryBlockDisableMergeAttribute::Locked);
+
+ while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Check if we can consolidate the next block's permission set with the current one.
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a
+ // single call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_info.GetPermission(), false, false,
+ first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+ : DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+ cur_size / PageSize, 0, false, properties,
+ OperationType::ChangePermissions, true));
+ }
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ if ((first || cur_needs_set_perm) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_info.GetPermission(), false, false,
+ first ? DisableMergeAttribute::EnableAndMergeHeadBodyTail
+ : DisableMergeAttribute::None};
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+ false, properties, OperationType::ChangePermissions, true));
+ }
+ }
+ };
+
+ // Iterate, reprotecting as needed.
+ {
+ // Get current and next iterators.
+ KMemoryBlockManager::const_iterator start_it =
+ m_memory_block_manager.FindIterator(mapping_start);
+ KMemoryBlockManager::const_iterator next_it = start_it;
+ ++next_it;
+
+ // Validate the current block.
+ KMemoryInfo cur_info = start_it->GetMemoryInfo();
+ R_ASSERT(this->CheckMemoryState(
+ cur_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+ // Create tracking variables.
+ KProcessAddress cur_address = cur_info.GetAddress();
+ size_t cur_size = cur_info.GetSize();
+ bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
+ bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
+ bool first =
+ cur_info.GetIpcDisableMergeCount() == 1 &&
+ False(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked);
+
+ while ((cur_address + cur_size - 1) < mapping_last) {
+ // Check that we have a next block.
+ ASSERT(next_it != m_memory_block_manager.end());
+
+ // Get the next info.
+ const KMemoryInfo next_info = next_it->GetMemoryInfo();
+
+ // Validate the next block.
+ R_ASSERT(this->CheckMemoryState(
+ next_info, test_state, test_state, KMemoryPermission::None, KMemoryPermission::None,
+ test_attr_mask | KMemoryAttribute::IpcLocked, KMemoryAttribute::IpcLocked));
+
+ // Check if we can consolidate the next block's permission set with the current one.
+ const bool next_perm_eq =
+ next_info.GetPermission() == next_info.GetOriginalPermission();
+ const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
+ if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
+ cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
+ // We can consolidate the reprotection for the current and next block into a single
+ // call.
+ cur_size += next_info.GetSize();
+ } else {
+ // We have to operate on the current block.
+ if ((cur_needs_set_perm || first) && !cur_perm_eq) {
+ const KPageProperties properties = {
+ cur_needs_set_perm ? cur_info.GetOriginalPermission()
+ : cur_info.GetPermission(),
+ false, false,
+ first ? DisableMergeAttribute::EnableHeadAndBody
+ : DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0,
+ false, properties, OperationType::ChangePermissions,
+ false));
+ }
+
+ // Mark that we mapped the block.
+ mapped_size += cur_size;
+
+ // Advance.
+ cur_address = next_info.GetAddress();
+ cur_size = next_info.GetSize();
+ first = false;
+ }
+
+ // Advance.
+ cur_info = next_info;
+ cur_perm_eq = next_perm_eq;
+ cur_needs_set_perm = next_needs_set_perm;
+ ++next_it;
+ }
+
+ // Process the last block.
+ const auto lock_count =
+ cur_info.GetIpcLockCount() +
+ (next_it != m_memory_block_manager.end()
+ ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+ : 0);
+ if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
+ const DisableMergeAttribute head_body_attr =
+ first ? DisableMergeAttribute::EnableHeadAndBody : DisableMergeAttribute::None;
+ const DisableMergeAttribute tail_attr =
+ lock_count == 1 ? DisableMergeAttribute::EnableTail : DisableMergeAttribute::None;
+ const KPageProperties properties = {
+ cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(),
+ false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, 0, false,
+ properties, OperationType::ChangePermissions, false));
+ }
+ }
+
+ // Create an update allocator.
+ // NOTE: Guaranteed zero blocks needed here.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, 0);
+ R_TRY(allocator_result);
+
+ // Unlock the pages.
+ m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
+ mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
+ KMemoryPermission::None);
+
+ R_SUCCEED();
+}
+
+void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list,
+ KProcessAddress address, size_t size,
+ KMemoryPermission prot_perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(address), PageSize));
+ ASSERT(Common::IsAligned(size, PageSize));
+
+ // Get the mapped extents.
+ const KProcessAddress src_map_start = address;
+ const KProcessAddress src_map_end = address + size;
+ const KProcessAddress src_map_last = src_map_end - 1;
+
+ // This function is only invoked when there's something to do.
+ ASSERT(src_map_end > src_map_start);
+
+ // Iterate over blocks, fixing permissions.
+ KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
+ while (true) {
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const auto cur_start = info.GetAddress() >= GetInteger(src_map_start)
+ ? info.GetAddress()
+ : GetInteger(src_map_start);
+ const auto cur_end =
+ src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
+
+ // If we can, fix the protections on the block.
+ if ((info.GetIpcLockCount() == 0 &&
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
+ (info.GetIpcLockCount() != 0 &&
+ (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
+ // Check if we actually need to fix the protections on the block.
+ if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) ||
+ (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
+ const bool start_nc = (info.GetAddress() == GetInteger(src_map_start))
+ ? (False(info.GetDisableMergeAttribute() &
+ (KMemoryBlockDisableMergeAttribute::Locked |
+ KMemoryBlockDisableMergeAttribute::IpcLeft)))
+ : info.GetAddress() <= GetInteger(src_map_start);
+
+ const DisableMergeAttribute head_body_attr =
+ start_nc ? DisableMergeAttribute::EnableHeadAndBody
+ : DisableMergeAttribute::None;
+ DisableMergeAttribute tail_attr;
+ if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
+ auto next_it = it;
+ ++next_it;
+
+ const auto lock_count =
+ info.GetIpcLockCount() +
+ (next_it != m_memory_block_manager.end()
+ ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
+ : 0);
+ tail_attr = lock_count == 0 ? DisableMergeAttribute::EnableTail
+ : DisableMergeAttribute::None;
+ } else {
+ tail_attr = DisableMergeAttribute::None;
+ }
+
+ const KPageProperties properties = {
+ info.GetPermission(), false, false,
+ static_cast<DisableMergeAttribute>(head_body_attr | tail_attr)};
+ R_ASSERT(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, 0,
+ false, properties, OperationType::ChangePermissions, true));
+ }
+ }
+
+ // If we're past the end of the region, we're done.
+ if (src_map_last <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
+ ASSERT(it != m_memory_block_manager.end());
+ }
+}
+
+Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+ // Calculate the last address for convenience.
+ const KProcessAddress last_address = address + size - 1;
+
+ // Define iteration variables.
+ KProcessAddress cur_address;
+ size_t mapped_size;
+
+ // The entire mapping process can be retried.
+ while (true) {
+ // Check if the memory is already mapped.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size mapped is the size requested, we've nothing to do.
+ R_SUCCEED_IF(size == mapped_size);
+ }
+
+ // Allocate and map the memory.
+ {
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, size - mapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the new memory.
+ KPageGroup pg(m_kernel, m_block_info_manager);
+ R_TRY(m_kernel.MemoryManager().AllocateForProcess(
+ std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option,
+ GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
+
+ // If we fail in the next bit (or retry), we need to cleanup the pages.
+ auto pg_guard = SCOPE_GUARD({
+ pg.OpenFirst();
+ pg.Close();
+ });
+
+ // Map the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ size_t num_allocator_blocks = 0;
+
+ // Verify that nobody has mapped memory since we first checked.
+ {
+ // Iterate over the memory.
+ size_t checked_mapped_size = 0;
+ cur_address = address;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ if (is_free) {
+ if (info.GetAddress() < GetInteger(address)) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (!is_free) {
+ checked_mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (!is_free) {
+ checked_mapped_size +=
+ KProcessAddress(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size now isn't what it was before, somebody mapped or unmapped
+ // concurrently. If this happened, retry.
+ if (mapped_size != checked_mapped_size) {
+ continue;
+ }
+ }
+
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager,
+ num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Prepare to iterate over the memory.
+ auto pg_it = pg.begin();
+ KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ pg_guard.Cancel();
+ cur_address = address;
+ ON_RESULT_FAILURE {
+ if (cur_address > address) {
+ const KProcessAddress last_unmap_address = cur_address - 1;
+
+ // Iterate, unmapping the pages.
+ cur_address = address;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is free, we mapped it and need to unmap it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to unmap.
+ const KPageProperties unmap_properties = {
+ KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ const size_t cur_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_unmap_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
+ cur_pages, 0, false, unmap_properties,
+ OperationType::Unmap, true));
+ }
+
+ // Check if we're done.
+ if (last_unmap_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ }
+
+ // Release any remaining unmapped memory.
+ m_kernel.MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
+ m_kernel.MemoryManager().Close(pg_phys_addr, pg_pages);
+ for (++pg_it; pg_it != pg.end(); ++pg_it) {
+ m_kernel.MemoryManager().OpenFirst(pg_it->GetAddress(),
+ pg_it->GetNumPages());
+ m_kernel.MemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages());
+ }
+ };
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If it's unmapped, we need to map it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to map.
+ const KPageProperties map_properties = {
+ KMemoryPermission::UserReadWrite, false, false,
+ cur_address == this->GetAliasRegionStart()
+ ? DisableMergeAttribute::DisableHead
+ : DisableMergeAttribute::None};
+ size_t map_pages =
+ std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ {
+ // Create a page group for the current mapping range.
+ KPageGroup cur_pg(m_kernel, m_block_info_manager);
+ {
+ ON_RESULT_FAILURE_2 {
+ cur_pg.OpenFirst();
+ cur_pg.Close();
+ };
+
+ size_t remain_pages = map_pages;
+ while (remain_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Add whatever we can to the current block.
+ const size_t cur_pages = std::min(pg_pages, remain_pages);
+ R_TRY(cur_pg.AddBlock(pg_phys_addr +
+ ((pg_pages - cur_pages) * PageSize),
+ cur_pages));
+
+ // Advance.
+ remain_pages -= cur_pages;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Map the papges.
+ R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
+ cur_pg, map_properties,
+ OperationType::MapFirstGroup, false));
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Increase our tracked mapped size.
+ m_mapped_physical_memory_size += (size - mapped_size);
+
+ // Update the relevant memory blocks.
+ m_memory_block_manager.UpdateIfMatch(
+ std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
+ address == this->GetAliasRegionStart()
+ ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::None);
+
+ R_SUCCEED();
+ }
+ }
+ }
+}
+
+Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock phys_lk(m_map_physical_memory_lock);
+
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ // Calculate the last address for convenience.
+ const KProcessAddress last_address = address + size - 1;
+
+ // Define iteration variables.
+ KProcessAddress map_start_address = 0;
+ KProcessAddress map_last_address = 0;
+
+ KProcessAddress cur_address;
+ size_t mapped_size;
+ size_t num_allocator_blocks = 0;
+
+ // Check if the memory is mapped.
+ {
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Verify the memory's state.
+ const bool is_normal = info.GetState() == KMemoryState::Normal &&
+ info.GetAttribute() == KMemoryAttribute::None;
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+ if (is_normal) {
+ R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+ if (map_start_address == 0) {
+ map_start_address = cur_address;
+ }
+ map_last_address =
+ (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
+
+ if (info.GetAddress() < GetInteger(address)) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+
+ mapped_size += (map_last_address + 1 - cur_address);
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If there's nothing mapped, we've nothing to do.
+ R_SUCCEED_IF(mapped_size == 0);
+ }
+
+ // Create an update allocator.
+ ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Separate the mapping.
+ const KPageProperties sep_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), map_start_address,
+ (map_last_address + 1 - map_start_address) / PageSize, 0, false,
+ sep_properties, OperationType::Separate, false));
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+
+ // Iterate over the memory, unmapping as we go.
+ auto it = m_memory_block_manager.FindIterator(cur_address);
+
+ const auto clear_merge_attr =
+ (it->GetState() == KMemoryState::Normal &&
+ it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
+ ? KMemoryBlockDisableMergeAttribute::Normal
+ : KMemoryBlockDisableMergeAttribute::None;
+
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != m_memory_block_manager.end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is normal, we need to unmap it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to unmap.
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
+ unmap_properties, OperationType::Unmap, false));
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // Release the memory resource.
+ m_mapped_physical_memory_size -= mapped_size;
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, mapped_size);
+
+ // Update memory blocks.
+ m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ clear_merge_attr);
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ UNIMPLEMENTED();
+ R_THROW(ResultNotImplemented);
+}
+
+Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+ KPageTableBase& src_page_table,
+ KProcessAddress src_address) {
+ // We need to lock both this table, and the current process's table, so set up an alias.
+ KPageTableBase& dst_page_table = *this;
+
+ // Acquire the table locks.
+ KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
+
+ // Check that the memory is mapped in the destination process.
+ size_t num_allocator_blocks;
+ R_TRY(dst_page_table.CheckMemoryState(
+ std::addressof(num_allocator_blocks), dst_address, size, KMemoryState::All,
+ KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
+ KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Check that the memory is mapped in the source process.
+ R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Validate that the memory ranges are compatible.
+ {
+ // Define a helper type.
+ struct ContiguousRangeInfo {
+ public:
+ KPageTableBase& m_pt;
+ TraversalContext m_context;
+ TraversalEntry m_entry;
+ KPhysicalAddress m_phys_addr;
+ size_t m_cur_size;
+ size_t m_remaining_size;
+
+ public:
+ ContiguousRangeInfo(KPageTableBase& pt, KProcessAddress address, size_t size)
+ : m_pt(pt), m_remaining_size(size) {
+ // Begin a traversal.
+ ASSERT(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry),
+ std::addressof(m_context), address));
+
+ // Setup tracking fields.
+ m_phys_addr = m_entry.phys_addr;
+ m_cur_size = std::min<size_t>(
+ m_remaining_size,
+ m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
+
+ // Consume the whole contiguous block.
+ this->DetermineContiguousBlockExtents();
+ }
+
+ void ContinueTraversal() {
+ // Update our remaining size.
+ m_remaining_size = m_remaining_size - m_cur_size;
+
+ // Update our tracking fields.
+ if (m_remaining_size > 0) {
+ m_phys_addr = m_entry.phys_addr;
+ m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
+
+ // Consume the whole contiguous block.
+ this->DetermineContiguousBlockExtents();
+ }
+ }
+
+ private:
+ void DetermineContiguousBlockExtents() {
+ // Continue traversing until we're not contiguous, or we have enough.
+ while (m_cur_size < m_remaining_size) {
+ ASSERT(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry),
+ std::addressof(m_context)));
+
+ // If we're not contiguous, we're done.
+ if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
+ break;
+ }
+
+ // Update our current size.
+ m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
+ }
+ }
+ };
+
+ // Create ranges for both tables.
+ ContiguousRangeInfo src_range(src_page_table, src_address, size);
+ ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
+
+ // Validate the ranges.
+ while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
+ R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, ResultInvalidMemoryRegion);
+ R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, ResultInvalidMemoryRegion);
+
+ src_range.ContinueTraversal();
+ dst_range.ContinueTraversal();
+ }
+ }
+
+ // We no longer need to hold our lock on the source page table.
+ lk.TryUnlockHalf(src_page_table.m_general_lock);
+
+ // Create an update allocator.
+ Result allocator_result;
+ KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
+ m_memory_block_slab_manager, num_allocator_blocks);
+ R_TRY(allocator_result);
+
+ // We're going to perform an update, so create a helper.
+ KScopedPageTableUpdater updater(this);
+
+ // Unmap the memory.
+ const size_t num_pages = size / PageSize;
+ const KPageProperties unmap_properties = {KMemoryPermission::None, false, false,
+ DisableMergeAttribute::None};
+ R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, 0, false, unmap_properties,
+ OperationType::Unmap, false));
+
+ // Apply the memory block update.
+ m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
+ KMemoryBlockDisableMergeAttribute::Normal);
+
+ R_SUCCEED();
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+ size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid,
+ const KPageProperties properties, OperationType operation,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(num_pages > 0);
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ ASSERT(this->ContainsPages(virt_addr, num_pages));
+
+ // As we don't allocate page entries in guest memory, we don't need to allocate them from
+ // or free them to the page list, and so it goes unused (along with page properties).
+
+ switch (operation) {
+ case OperationType::Unmap: {
+ // Ensure that any pages we track are closed on exit.
+ KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
+ SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
+
+ // Make a page group representing the region to unmap.
+ this->MakePageGroup(pages_to_close, virt_addr, num_pages);
+
+ // Unmap.
+ m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
+
+ R_SUCCEED();
+ }
+ case OperationType::Map: {
+ ASSERT(virt_addr != 0);
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
+ ConvertToMemoryPermission(properties.perm));
+
+ // Open references to pages, if we should.
+ if (this->IsHeapPhysicalAddress(phys_addr)) {
+ m_kernel.MemoryManager().Open(phys_addr, num_pages);
+ }
+
+ R_SUCCEED();
+ }
+ case OperationType::Separate: {
+ // TODO: Unimplemented.
+ R_SUCCEED();
+ }
+ case OperationType::ChangePermissions:
+ case OperationType::ChangePermissionsAndRefresh:
+ case OperationType::ChangePermissionsAndRefreshAndFlush: {
+ m_memory->ProtectRegion(*m_impl, virt_addr, num_pages * PageSize,
+ ConvertToMemoryPermission(properties.perm));
+ R_SUCCEED();
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_addr,
+ size_t num_pages, const KPageGroup& page_group,
+ const KPageProperties properties, OperationType operation,
+ bool reuse_ll) {
+ ASSERT(this->IsLockedByCurrentThread());
+ ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
+ ASSERT(num_pages > 0);
+ ASSERT(num_pages == page_group.GetNumPages());
+
+ // As we don't allocate page entries in guest memory, we don't need to allocate them from
+ // the page list, and so it goes unused (along with page properties).
+
+ switch (operation) {
+ case OperationType::MapGroup:
+ case OperationType::MapFirstGroup: {
+ // We want to maintain a new reference to every page in the group.
+ KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
+
+ for (const auto& node : page_group) {
+ const size_t size{node.GetNumPages() * PageSize};
+
+ // Map the pages.
+ m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
+ ConvertToMemoryPermission(properties.perm));
+
+ virt_addr += size;
+ }
+
+ // We succeeded! We want to persist the reference to the pages.
+ spg.CancelClose();
+
+ R_SUCCEED();
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void KPageTableBase::FinalizeUpdate(PageLinkedList* page_list) {
+ while (page_list->Peek()) {
+ [[maybe_unused]] auto page = page_list->Pop();
+
+ // TODO: Free page entries once they are allocated in guest memory.
+ // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
+ // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
+ // this->GetPageTableManager().Free(page);
+ }
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
new file mode 100644
index 000000000..556d230b3
--- /dev/null
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -0,0 +1,760 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_funcs.h"
+#include "common/page_table.h"
+#include "core/core.h"
+#include "core/hle/kernel/k_dynamic_resource_manager.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_block_manager.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_typed_address.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/result.h"
+#include "core/memory.h"
+
+namespace Kernel {
+
+enum class DisableMergeAttribute : u8 {
+ None = (0U << 0),
+
+ DisableHead = (1U << 0),
+ DisableHeadAndBody = (1U << 1),
+ EnableHeadAndBody = (1U << 2),
+ DisableTail = (1U << 3),
+ EnableTail = (1U << 4),
+ EnableAndMergeHeadBodyTail = (1U << 5),
+
+ EnableHeadBodyTail = EnableHeadAndBody | EnableTail,
+ DisableHeadBodyTail = DisableHeadAndBody | DisableTail,
+};
+DECLARE_ENUM_FLAG_OPERATORS(DisableMergeAttribute);
+
+struct KPageProperties {
+ KMemoryPermission perm;
+ bool io;
+ bool uncached;
+ DisableMergeAttribute disable_merge_attributes;
+};
+static_assert(std::is_trivial_v<KPageProperties>);
+static_assert(sizeof(KPageProperties) == sizeof(u32));
+
+class KResourceLimit;
+class KSystemResource;
+
+class KPageTableBase {
+ YUZU_NON_COPYABLE(KPageTableBase);
+ YUZU_NON_MOVEABLE(KPageTableBase);
+
+public:
+ using TraversalEntry = Common::PageTable::TraversalEntry;
+ using TraversalContext = Common::PageTable::TraversalContext;
+
+ class MemoryRange {
+ private:
+ KernelCore& m_kernel;
+ KPhysicalAddress m_address;
+ size_t m_size;
+ bool m_heap;
+
+ public:
+ explicit MemoryRange(KernelCore& kernel)
+ : m_kernel(kernel), m_address(0), m_size(0), m_heap(false) {}
+
+ void Set(KPhysicalAddress address, size_t size, bool heap) {
+ m_address = address;
+ m_size = size;
+ m_heap = heap;
+ }
+
+ KPhysicalAddress GetAddress() const {
+ return m_address;
+ }
+ size_t GetSize() const {
+ return m_size;
+ }
+ bool IsHeap() const {
+ return m_heap;
+ }
+
+ void Open();
+ void Close();
+ };
+
+protected:
+ enum MemoryFillValue : u8 {
+ MemoryFillValue_Zero = 0,
+ MemoryFillValue_Stack = 'X',
+ MemoryFillValue_Ipc = 'Y',
+ MemoryFillValue_Heap = 'Z',
+ };
+
+ enum class OperationType {
+ Map = 0,
+ MapGroup = 1,
+ MapFirstGroup = 2,
+ Unmap = 3,
+ ChangePermissions = 4,
+ ChangePermissionsAndRefresh = 5,
+ ChangePermissionsAndRefreshAndFlush = 6,
+ Separate = 7,
+ };
+
+ static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
+ static constexpr size_t RegionAlignment = 2_MiB;
+ static_assert(RegionAlignment == KernelAslrAlignment);
+
+ struct PageLinkedList {
+ private:
+ struct Node {
+ Node* m_next;
+ std::array<u8, PageSize - sizeof(Node*)> m_buffer;
+ };
+ static_assert(std::is_trivial_v<Node>);
+
+ private:
+ Node* m_root{};
+
+ public:
+ constexpr PageLinkedList() : m_root(nullptr) {}
+
+ void Push(Node* n) {
+ ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
+ n->m_next = m_root;
+ m_root = n;
+ }
+
+ Node* Peek() const {
+ return m_root;
+ }
+
+ Node* Pop() {
+ Node* const r = m_root;
+
+ m_root = r->m_next;
+ r->m_next = nullptr;
+
+ return r;
+ }
+ };
+ static_assert(std::is_trivially_destructible_v<PageLinkedList>);
+
+ static constexpr auto DefaultMemoryIgnoreAttr =
+ KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
+
+ static constexpr size_t GetAddressSpaceWidth(Svc::CreateProcessFlag as_type) {
+ switch (static_cast<Svc::CreateProcessFlag>(as_type &
+ Svc::CreateProcessFlag::AddressSpaceMask)) {
+ case Svc::CreateProcessFlag::AddressSpace64Bit:
+ return 39;
+ case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
+ return 36;
+ case Svc::CreateProcessFlag::AddressSpace32Bit:
+ case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
+ return 32;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+private:
+ class KScopedPageTableUpdater {
+ private:
+ KPageTableBase* m_pt;
+ PageLinkedList m_ll;
+
+ public:
+ explicit KScopedPageTableUpdater(KPageTableBase* pt) : m_pt(pt), m_ll() {}
+ explicit KScopedPageTableUpdater(KPageTableBase& pt)
+ : KScopedPageTableUpdater(std::addressof(pt)) {}
+ ~KScopedPageTableUpdater() {
+ m_pt->FinalizeUpdate(this->GetPageList());
+ }
+
+ PageLinkedList* GetPageList() {
+ return std::addressof(m_ll);
+ }
+ };
+
+private:
+ KernelCore& m_kernel;
+ Core::System& m_system;
+ KProcessAddress m_address_space_start{};
+ KProcessAddress m_address_space_end{};
+ KProcessAddress m_heap_region_start{};
+ KProcessAddress m_heap_region_end{};
+ KProcessAddress m_current_heap_end{};
+ KProcessAddress m_alias_region_start{};
+ KProcessAddress m_alias_region_end{};
+ KProcessAddress m_stack_region_start{};
+ KProcessAddress m_stack_region_end{};
+ KProcessAddress m_kernel_map_region_start{};
+ KProcessAddress m_kernel_map_region_end{};
+ KProcessAddress m_alias_code_region_start{};
+ KProcessAddress m_alias_code_region_end{};
+ KProcessAddress m_code_region_start{};
+ KProcessAddress m_code_region_end{};
+ size_t m_max_heap_size{};
+ size_t m_mapped_physical_memory_size{};
+ size_t m_mapped_unsafe_physical_memory{};
+ size_t m_mapped_insecure_memory{};
+ size_t m_mapped_ipc_server_memory{};
+ mutable KLightLock m_general_lock;
+ mutable KLightLock m_map_physical_memory_lock;
+ KLightLock m_device_map_lock;
+ std::unique_ptr<Common::PageTable> m_impl{};
+ Core::Memory::Memory* m_memory{};
+ KMemoryBlockManager m_memory_block_manager{};
+ u32 m_allocate_option{};
+ u32 m_address_space_width{};
+ bool m_is_kernel{};
+ bool m_enable_aslr{};
+ bool m_enable_device_address_space_merge{};
+ KMemoryBlockSlabManager* m_memory_block_slab_manager{};
+ KBlockInfoManager* m_block_info_manager{};
+ KResourceLimit* m_resource_limit{};
+ const KMemoryRegion* m_cached_physical_linear_region{};
+ const KMemoryRegion* m_cached_physical_heap_region{};
+ MemoryFillValue m_heap_fill_value{};
+ MemoryFillValue m_ipc_fill_value{};
+ MemoryFillValue m_stack_fill_value{};
+
+public:
+ explicit KPageTableBase(KernelCore& kernel);
+ ~KPageTableBase();
+
+ Result InitializeForKernel(bool is_64_bit, KVirtualAddress start, KVirtualAddress end,
+ Core::Memory::Memory& memory);
+ Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
+ bool enable_device_address_space_merge, bool from_back,
+ KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit, Core::Memory::Memory& memory,
+ KProcessAddress aslr_space_start);
+
+ void Finalize();
+
+ bool IsKernel() const {
+ return m_is_kernel;
+ }
+ bool IsAslrEnabled() const {
+ return m_enable_aslr;
+ }
+
+ bool Contains(KProcessAddress addr) const {
+ return m_address_space_start <= addr && addr <= m_address_space_end - 1;
+ }
+
+ bool Contains(KProcessAddress addr, size_t size) const {
+ return m_address_space_start <= addr && addr < addr + size &&
+ addr + size - 1 <= m_address_space_end - 1;
+ }
+
+ bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+ return this->Contains(addr, size) && m_alias_region_start <= addr &&
+ addr + size - 1 <= m_alias_region_end - 1;
+ }
+
+ bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+ return this->Contains(addr, size) && m_heap_region_start <= addr &&
+ addr + size - 1 <= m_heap_region_end - 1;
+ }
+
+ bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+ // Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the
+ // alias code region.
+ return this->CanContain(addr, size, Svc::MemoryState::AliasCode);
+ }
+
+ KScopedLightLock AcquireDeviceMapLock() {
+ return KScopedLightLock(m_device_map_lock);
+ }
+
+ KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
+ size_t GetRegionSize(Svc::MemoryState state) const;
+ bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
+
+ KProcessAddress GetRegionAddress(KMemoryState state) const {
+ return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+ size_t GetRegionSize(KMemoryState state) const {
+ return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ return this->CanContain(addr, size,
+ static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
+ }
+
+public:
+ Core::Memory::Memory& GetMemory() {
+ return *m_memory;
+ }
+
+ Core::Memory::Memory& GetMemory() const {
+ return *m_memory;
+ }
+
+ Common::PageTable& GetImpl() {
+ return *m_impl;
+ }
+
+ Common::PageTable& GetImpl() const {
+ return *m_impl;
+ }
+
+ size_t GetNumGuardPages() const {
+ return this->IsKernel() ? 1 : 4;
+ }
+
+protected:
+ // NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions
+ // in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived
+ // class, and this avoids unnecessary virtual function calls.
+ Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+ KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties,
+ OperationType operation, bool reuse_ll);
+ Result Operate(PageLinkedList* page_list, KProcessAddress virt_addr, size_t num_pages,
+ const KPageGroup& page_group, const KPageProperties properties,
+ OperationType operation, bool reuse_ll);
+ void FinalizeUpdate(PageLinkedList* page_list);
+
+ bool IsLockedByCurrentThread() const {
+ return m_general_lock.IsLockedByCurrentThread();
+ }
+
+ bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+ m_cached_physical_linear_region, phys_addr);
+ }
+
+ bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsLinearMappedPhysicalAddress(
+ m_cached_physical_linear_region, phys_addr, size);
+ }
+
+ bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr);
+ }
+
+ bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr, size);
+ }
+
+ bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ return m_kernel.MemoryLayout().IsHeapPhysicalAddress(m_cached_physical_heap_region,
+ phys_addr);
+ }
+
+ bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
+ return (m_address_space_start <= addr) &&
+ (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
+ (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
+ }
+
+private:
+ KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
+ size_t num_pages, size_t alignment, size_t offset,
+ size_t guard_pages) const;
+
+ Result CheckMemoryStateContiguous(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
+ R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
+ perm, attr_mask, attr));
+ }
+
+ Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, size_t* out_blocks_needed,
+ KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(size_t* out_blocks_needed, KProcessAddress addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
+ state_mask, state, perm_mask, perm, attr_mask, attr,
+ ignore_attr));
+ }
+ Result CheckMemoryState(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
+ attr_mask, attr, ignore_attr));
+ }
+
+ Result LockMemoryAndOpen(KPageGroup* out_pg, KPhysicalAddress* out_paddr, KProcessAddress addr,
+ size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ Result UnlockMemory(KProcessAddress addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageGroup* pg);
+
+ Result QueryInfoImpl(KMemoryInfo* out_info, Svc::PageInfo* out_page,
+ KProcessAddress address) const;
+
+ Result QueryMappingImpl(KProcessAddress* out, KPhysicalAddress address, size_t size,
+ Svc::MemoryState state) const;
+
+ Result AllocateAndMapPagesImpl(PageLinkedList* page_list, KProcessAddress address,
+ size_t num_pages, KMemoryPermission perm);
+ Result MapPageGroupImpl(PageLinkedList* page_list, KProcessAddress address,
+ const KPageGroup& pg, const KPageProperties properties, bool reuse_ll);
+
+ void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
+ const KPageGroup& pg);
+
+ Result MakePageGroup(KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, KProcessAddress addr, size_t num_pages);
+
+ Result GetContiguousMemoryRangeWithState(MemoryRange* out, KProcessAddress address, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
+
+ Result MapIoImpl(KProcessAddress* out, PageLinkedList* page_list, KPhysicalAddress phys_addr,
+ size_t size, KMemoryState state, KMemoryPermission perm);
+ Result ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddress phys_addr, size_t size,
+ KMemoryState state);
+ Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAddress src_addr, size_t size,
+ KMemoryState state);
+
+ Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
+ KProcessAddress address, size_t size, KMemoryPermission test_perm,
+ KMemoryState dst_state);
+ Result SetupForIpcServer(KProcessAddress* out_addr, size_t size, KProcessAddress src_addr,
+ KMemoryPermission test_perm, KMemoryState dst_state,
+ KPageTableBase& src_page_table, bool send);
+ void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, KProcessAddress address,
+ size_t size, KMemoryPermission prot_perm);
+
+ size_t GetSize(KMemoryState state) const;
+
+ bool GetPhysicalAddressLocked(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+ // Validate pre-conditions.
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return this->GetImpl().GetPhysicalAddress(out, virt_addr);
+ }
+
+public:
+ bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress virt_addr) const {
+ // Validate pre-conditions.
+ ASSERT(!this->IsLockedByCurrentThread());
+
+ // Acquire exclusive access to the table while doing address translation.
+ KScopedLightLock lk(m_general_lock);
+
+ return this->GetPhysicalAddressLocked(out, virt_addr);
+ }
+
+ KBlockInfoManager* GetBlockInfoManager() const {
+ return m_block_info_manager;
+ }
+
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm);
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr);
+ Result SetHeapSize(KProcessAddress* out, size_t size);
+ Result SetMaxHeapSize(size_t size);
+ Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const;
+ Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) const;
+ Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+ R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Static));
+ }
+ Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) const {
+ R_RETURN(this->QueryMappingImpl(out, address, size, Svc::MemoryState::Io));
+ }
+ Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+ Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping, Svc::MemoryPermission perm);
+ Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping);
+ Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
+ Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
+ Result MapInsecureMemory(KProcessAddress address, size_t size);
+ Result UnmapInsecureMemory(KProcessAddress address, size_t size);
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KProcessAddress region_start,
+ size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start,
+ region_num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(this->MapPages(out_addr, num_pages, PageSize, 0, false,
+ this->GetRegionAddress(state),
+ this->GetRegionSize(state) / PageSize, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
+
+ Result MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
+ KProcessAddress region_start, size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm);
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state);
+
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
+
+ Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
+ Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
+
+ Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state);
+
+ Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
+ Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state);
+
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned, bool check_heap);
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
+
+ Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
+ Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
+
+ Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned);
+ Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange* out, KProcessAddress address,
+ size_t size);
+
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size);
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
+
+ Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm);
+ Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size);
+ Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg);
+
+ Result OpenMemoryRangeForProcessCacheOperation(MemoryRange* out, KProcessAddress address,
+ size_t size);
+
+ Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromLinearToKernel(void* buffer, size_t size, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state,
+ KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr);
+ Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ void* buffer);
+ Result CopyMemoryFromHeapToHeap(KPageTableBase& dst_page_table, KProcessAddress dst_addr,
+ size_t size, KMemoryState dst_state_mask,
+ KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+ Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KPageTableBase& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr);
+
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KPageTableBase& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send);
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
+
+ Result MapPhysicalMemory(KProcessAddress address, size_t size);
+ Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
+
+ Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+ Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
+
+ Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase& src_pt,
+ KProcessAddress src_address);
+
+public:
+ KProcessAddress GetAddressSpaceStart() const {
+ return m_address_space_start;
+ }
+ KProcessAddress GetHeapRegionStart() const {
+ return m_heap_region_start;
+ }
+ KProcessAddress GetAliasRegionStart() const {
+ return m_alias_region_start;
+ }
+ KProcessAddress GetStackRegionStart() const {
+ return m_stack_region_start;
+ }
+ KProcessAddress GetKernelMapRegionStart() const {
+ return m_kernel_map_region_start;
+ }
+ KProcessAddress GetCodeRegionStart() const {
+ return m_code_region_start;
+ }
+ KProcessAddress GetAliasCodeRegionStart() const {
+ return m_alias_code_region_start;
+ }
+
+ size_t GetAddressSpaceSize() const {
+ return m_address_space_end - m_address_space_start;
+ }
+ size_t GetHeapRegionSize() const {
+ return m_heap_region_end - m_heap_region_start;
+ }
+ size_t GetAliasRegionSize() const {
+ return m_alias_region_end - m_alias_region_start;
+ }
+ size_t GetStackRegionSize() const {
+ return m_stack_region_end - m_stack_region_start;
+ }
+ size_t GetKernelMapRegionSize() const {
+ return m_kernel_map_region_end - m_kernel_map_region_start;
+ }
+ size_t GetCodeRegionSize() const {
+ return m_code_region_end - m_code_region_start;
+ }
+ size_t GetAliasCodeRegionSize() const {
+ return m_alias_code_region_end - m_alias_code_region_start;
+ }
+
+ size_t GetNormalMemorySize() const {
+ // Lock the table.
+ KScopedLightLock lk(m_general_lock);
+
+ return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
+ }
+
+ size_t GetCodeSize() const;
+ size_t GetCodeDataSize() const;
+ size_t GetAliasCodeSize() const;
+ size_t GetAliasCodeDataSize() const;
+
+ u32 GetAllocateOption() const {
+ return m_allocate_option;
+ }
+
+ u32 GetAddressSpaceWidth() const {
+ return m_address_space_width;
+ }
+
+public:
+ // Linear mapped
+ static u8* GetLinearMappedVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+ return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+ }
+
+ static KPhysicalAddress GetLinearMappedPhysicalAddress(KernelCore& kernel,
+ KVirtualAddress addr) {
+ return kernel.MemoryLayout().GetLinearPhysicalAddress(addr);
+ }
+
+ static KVirtualAddress GetLinearMappedVirtualAddress(KernelCore& kernel,
+ KPhysicalAddress addr) {
+ return kernel.MemoryLayout().GetLinearVirtualAddress(addr);
+ }
+
+ // Heap
+ static u8* GetHeapVirtualPointer(KernelCore& kernel, KPhysicalAddress addr) {
+ return kernel.System().DeviceMemory().GetPointer<u8>(addr);
+ }
+
+ static KPhysicalAddress GetHeapPhysicalAddress(KernelCore& kernel, KVirtualAddress addr) {
+ return GetLinearMappedPhysicalAddress(kernel, addr);
+ }
+
+ static KVirtualAddress GetHeapVirtualAddress(KernelCore& kernel, KPhysicalAddress addr) {
+ return GetLinearMappedVirtualAddress(kernel, addr);
+ }
+
+ // Member heap
+ u8* GetHeapVirtualPointer(KPhysicalAddress addr) {
+ return GetHeapVirtualPointer(m_kernel, addr);
+ }
+
+ KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
+ return GetHeapPhysicalAddress(m_kernel, addr);
+ }
+
+ KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
+ return GetHeapVirtualAddress(m_kernel, addr);
+ }
+
+ // TODO: GetPageTableVirtualAddress
+ // TODO: GetPageTablePhysicalAddress
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 7fa34d693..6c29eb72c 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1,515 +1,598 @@
-// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <algorithm>
-#include <bitset>
-#include <ctime>
-#include <memory>
#include <random>
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core.h"
-#include "core/file_sys/program_metadata.h"
-#include "core/hle/kernel/code_set.h"
-#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_resource_limit.h"
-#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
-#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/svc_results.h"
-#include "core/memory.h"
+#include "core/hle/kernel/k_thread_local_page.h"
+#include "core/hle/kernel/k_thread_queue.h"
+#include "core/hle/kernel/k_worker_task_manager.h"
namespace Kernel {
-namespace {
-/**
- * Sets up the primary application thread
- *
- * @param system The system instance to create the main thread under.
- * @param owner_process The parent process for the main thread
- * @param priority The priority to give the main thread
- */
-void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority,
- KProcessAddress stack_top) {
- const KProcessAddress entry_point = owner_process.GetEntryPoint();
- ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
-
- KThread* thread = KThread::Create(system.Kernel());
- SCOPE_EXIT({ thread->Close(); });
-
- ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
- owner_process.GetIdealCoreId(),
- std::addressof(owner_process))
- .IsSuccess());
-
- // Register 1 must be a handle to the main thread
- Handle thread_handle{};
- owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread);
-
- thread->GetContext32().cpu_registers[0] = 0;
- thread->GetContext64().cpu_registers[0] = 0;
- thread->GetContext32().cpu_registers[1] = thread_handle;
- thread->GetContext64().cpu_registers[1] = thread_handle;
-
- if (system.DebuggerEnabled()) {
- thread->RequestSuspend(SuspendType::Debug);
- }
- // Run our thread.
- void(thread->Run());
-}
-} // Anonymous namespace
+namespace {
-Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit) {
- auto& kernel = system.Kernel();
+Result TerminateChildren(KernelCore& kernel, KProcess* process,
+ const KThread* thread_to_not_terminate) {
+ // Request that all children threads terminate.
+ {
+ KScopedLightLock proc_lk(process->GetListLock());
+ KScopedSchedulerLock sl(kernel);
+
+ if (thread_to_not_terminate != nullptr &&
+ process->GetPinnedThread(GetCurrentCoreId(kernel)) == thread_to_not_terminate) {
+ // NOTE: Here Nintendo unpins the current thread instead of the thread_to_not_terminate.
+ // This is valid because the only caller which uses non-nullptr as argument uses
+ // GetCurrentThreadPointer(), but it's still notable because it seems incorrect at
+ // first glance.
+ process->UnpinCurrentThread();
+ }
- process->name = std::move(process_name);
- process->m_resource_limit = res_limit;
- process->m_system_resource_address = 0;
- process->m_state = State::Created;
- process->m_program_id = 0;
- process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
- : kernel.CreateNewUserProcessID();
- process->m_capabilities.InitializeForMetadatalessProcess();
- process->m_is_initialized = true;
+ auto& thread_list = process->GetThreadList();
+ for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
+ if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
+ if (thread->GetState() != ThreadState::Terminated) {
+ thread->RequestTerminate();
+ }
+ }
+ }
+ }
- std::mt19937 rng(Settings::values.rng_seed_enabled ? Settings::values.rng_seed.GetValue()
- : static_cast<u32>(std::time(nullptr)));
- std::uniform_int_distribution<u64> distribution;
- std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(),
- [&] { return distribution(rng); });
+ // Wait for all children threads to terminate.
+ while (true) {
+ // Get the next child.
+ KThread* cur_child = nullptr;
+ {
+ KScopedLightLock proc_lk(process->GetListLock());
+
+ auto& thread_list = process->GetThreadList();
+ for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
+ if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
+ if (thread->GetState() != ThreadState::Terminated) {
+ if (thread->Open()) {
+ cur_child = thread;
+ break;
+ }
+ }
+ }
+ }
+ }
- kernel.AppendNewProcess(process);
+ // If we didn't find any non-terminated children, we're done.
+ if (cur_child == nullptr) {
+ break;
+ }
- // Clear remaining fields.
- process->m_num_running_threads = 0;
- process->m_is_signaled = false;
- process->m_exception_thread = nullptr;
- process->m_is_suspended = false;
- process->m_schedule_count = 0;
- process->m_is_handle_table_initialized = false;
- process->m_is_hbl = false;
+ // Terminate and close the thread.
+ SCOPE_EXIT({ cur_child->Close(); });
- // Open a reference to the resource limit.
- process->m_resource_limit->Open();
+ if (const Result terminate_result = cur_child->Terminate();
+ ResultTerminationRequested == terminate_result) {
+ R_THROW(terminate_result);
+ }
+ }
R_SUCCEED();
}
-void KProcess::DoWorkerTaskImpl() {
- UNIMPLEMENTED();
-}
-
-KResourceLimit* KProcess::GetResourceLimit() const {
- return m_resource_limit;
-}
+class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
+private:
+ KThread** m_exception_thread;
-void KProcess::IncrementRunningThreadCount() {
- ASSERT(m_num_running_threads.load() >= 0);
- ++m_num_running_threads;
-}
+public:
+ explicit ThreadQueueImplForKProcessEnterUserException(KernelCore& kernel, KThread** t)
+ : KThreadQueue(kernel), m_exception_thread(t) {}
-void KProcess::DecrementRunningThreadCount() {
- ASSERT(m_num_running_threads.load() > 0);
+ virtual void EndWait(KThread* waiting_thread, Result wait_result) override {
+ // Set the exception thread.
+ *m_exception_thread = waiting_thread;
- if (const auto prev = m_num_running_threads--; prev == 1) {
- // TODO(bunnei): Process termination to be implemented when multiprocess is supported.
+ // Invoke the base end wait handler.
+ KThreadQueue::EndWait(waiting_thread, wait_result);
}
-}
-u64 KProcess::GetTotalPhysicalMemoryAvailable() {
- const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
- m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size +
- m_main_thread_stack_size};
- if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
- capacity != pool_size) {
- LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
- }
- if (capacity < m_memory_usage_capacity) {
- return capacity;
+ virtual void CancelWait(KThread* waiting_thread, Result wait_result,
+ bool cancel_timer_task) override {
+ // Remove the thread as a waiter on its mutex owner.
+ waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
+
+ // Invoke the base cancel wait handler.
+ KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
- return m_memory_usage_capacity;
-}
+};
-u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
- return this->GetTotalPhysicalMemoryAvailable() - this->GetSystemResourceSize();
+void GenerateRandom(std::span<u64> out_random) {
+ std::mt19937 rng(Settings::values.rng_seed_enabled ? Settings::values.rng_seed.GetValue()
+ : static_cast<u32>(std::time(nullptr)));
+ std::uniform_int_distribution<u64> distribution;
+ std::generate(out_random.begin(), out_random.end(), [&] { return distribution(rng); });
}
-u64 KProcess::GetTotalPhysicalMemoryUsed() {
- return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() +
- this->GetSystemResourceSize();
-}
+} // namespace
-u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
- return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize();
-}
+void KProcess::Finalize() {
+ // Delete the process local region.
+ this->DeleteThreadLocalRegion(m_plr_address);
-bool KProcess::ReleaseUserException(KThread* thread) {
- KScopedSchedulerLock sl{m_kernel};
+ // Get the used memory size.
+ const size_t used_memory_size = this->GetUsedNonSystemUserPhysicalMemorySize();
- if (m_exception_thread == thread) {
- m_exception_thread = nullptr;
+ // Finalize the page table.
+ m_page_table.Finalize();
- // Remove waiter thread.
- bool has_waiters{};
- if (KThread* next = thread->RemoveKernelWaiterByKey(
- std::addressof(has_waiters),
- reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)));
- next != nullptr) {
- next->EndWait(ResultSuccess);
+ // Finish using our system resource.
+ if (m_system_resource) {
+ if (m_system_resource->IsSecureResource()) {
+ // Finalize optimized memory. If memory wasn't optimized, this is a no-op.
+ m_kernel.MemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
}
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
-
- return true;
- } else {
- return false;
+ m_system_resource->Close();
+ m_system_resource = nullptr;
}
-}
-
-void KProcess::PinCurrentThread(s32 core_id) {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
- // Get the current thread.
- KThread* cur_thread =
- m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
+ // Free all shared memory infos.
+ {
+ auto it = m_shared_memory_list.begin();
+ while (it != m_shared_memory_list.end()) {
+ KSharedMemoryInfo* info = std::addressof(*it);
+ KSharedMemory* shmem = info->GetSharedMemory();
- // If the thread isn't terminated, pin it.
- if (!cur_thread->IsTerminationRequested()) {
- // Pin it.
- this->PinThread(core_id, cur_thread);
- cur_thread->Pin(core_id);
+ while (!info->Close()) {
+ shmem->Close();
+ }
+ shmem->Close();
- // An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ it = m_shared_memory_list.erase(it);
+ KSharedMemoryInfo::Free(m_kernel, info);
+ }
}
-}
-void KProcess::UnpinCurrentThread(s32 core_id) {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
-
- // Get the current thread.
- KThread* cur_thread =
- m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
+ // Our thread local page list must be empty at this point.
+ ASSERT(m_partially_used_tlp_tree.empty());
+ ASSERT(m_fully_used_tlp_tree.empty());
- // Unpin it.
- cur_thread->Unpin();
- this->UnpinThread(core_id, cur_thread);
+ // Release memory to the resource limit.
+ if (m_resource_limit != nullptr) {
+ ASSERT(used_memory_size >= m_memory_release_hint);
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, used_memory_size,
+ used_memory_size - m_memory_release_hint);
+ m_resource_limit->Close();
+ }
- // An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ // Perform inherited finalization.
+ KSynchronizationObject::Finalize();
}
-void KProcess::UnpinThread(KThread* thread) {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
-
- // Get the thread's core id.
- const auto core_id = thread->GetActiveCore();
+Result KProcess::Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
+ bool is_real) {
+ // TODO: remove this special case
+ if (is_real) {
+ // Create and clear the process local region.
+ R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
+ this->GetMemory().ZeroBlock(m_plr_address, Svc::ThreadLocalRegionSize);
+ }
- // Unpin it.
- this->UnpinThread(core_id, thread);
- thread->Unpin();
+ // Copy in the name from parameters.
+ static_assert(sizeof(params.name) < sizeof(m_name));
+ std::memcpy(m_name.data(), params.name.data(), sizeof(params.name));
+ m_name[sizeof(params.name)] = 0;
+
+ // Set misc fields.
+ m_state = State::Created;
+ m_main_thread_stack_size = 0;
+ m_used_kernel_memory_size = 0;
+ m_ideal_core_id = 0;
+ m_flags = params.flags;
+ m_version = params.version;
+ m_program_id = params.program_id;
+ m_code_address = params.code_address;
+ m_code_size = params.code_num_pages * PageSize;
+ m_is_application = True(params.flags & Svc::CreateProcessFlag::IsApplication);
+
+ // Set thread fields.
+ for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ m_running_threads[i] = nullptr;
+ m_pinned_threads[i] = nullptr;
+ m_running_thread_idle_counts[i] = 0;
+ m_running_thread_switch_counts[i] = 0;
+ }
- // An update is needed.
- KScheduler::SetSchedulerUpdateNeeded(m_kernel);
-}
+ // Set max memory based on address space type.
+ switch ((params.flags & Svc::CreateProcessFlag::AddressSpaceMask)) {
+ case Svc::CreateProcessFlag::AddressSpace32Bit:
+ case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
+ case Svc::CreateProcessFlag::AddressSpace64Bit:
+ m_max_process_memory = m_page_table.GetHeapRegionSize();
+ break;
+ case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
+ m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
+ break;
+ default:
+ UNREACHABLE();
+ }
-Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
- [[maybe_unused]] size_t size) {
- // Lock ourselves, to prevent concurrent access.
- KScopedLightLock lk(m_state_lock);
+ // Generate random entropy.
+ GenerateRandom(m_entropy);
- // Try to find an existing info for the memory.
- KSharedMemoryInfo* shemen_info = nullptr;
- const auto iter = std::find_if(
- m_shared_memory_list.begin(), m_shared_memory_list.end(),
- [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
- if (iter != m_shared_memory_list.end()) {
- shemen_info = *iter;
- }
+ // Clear remaining fields.
+ m_num_running_threads = 0;
+ m_num_process_switches = 0;
+ m_num_thread_switches = 0;
+ m_num_fpu_switches = 0;
+ m_num_supervisor_calls = 0;
+ m_num_ipc_messages = 0;
- if (shemen_info == nullptr) {
- shemen_info = KSharedMemoryInfo::Allocate(m_kernel);
- R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
+ m_is_signaled = false;
+ m_exception_thread = nullptr;
+ m_is_suspended = false;
+ m_memory_release_hint = 0;
+ m_schedule_count = 0;
+ m_is_handle_table_initialized = false;
- shemen_info->Initialize(shmem);
- m_shared_memory_list.push_back(shemen_info);
- }
+ // Open a reference to our resource limit.
+ m_resource_limit = res_limit;
+ m_resource_limit->Open();
- // Open a reference to the shared memory and its info.
- shmem->Open();
- shemen_info->Open();
+ // We're initialized!
+ m_is_initialized = true;
R_SUCCEED();
}
-void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address,
- [[maybe_unused]] size_t size) {
- // Lock ourselves, to prevent concurrent access.
- KScopedLightLock lk(m_state_lock);
+Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
+ std::span<const u32> caps, KResourceLimit* res_limit,
+ KMemoryManager::Pool pool, bool immortal) {
+ ASSERT(res_limit != nullptr);
+ ASSERT((params.code_num_pages * PageSize) / PageSize ==
+ static_cast<size_t>(params.code_num_pages));
+
+ // Set members.
+ m_memory_pool = pool;
+ m_is_default_application_system_resource = false;
+ m_is_immortal = immortal;
+
+ // Setup our system resource.
+ if (const size_t system_resource_num_pages = params.system_resource_num_pages;
+ system_resource_num_pages != 0) {
+ // Create a secure system resource.
+ KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
+ R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
+
+ ON_RESULT_FAILURE {
+ secure_resource->Close();
+ };
+
+ // Initialize the secure resource.
+ R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, res_limit,
+ m_memory_pool));
+
+ // Set our system resource.
+ m_system_resource = secure_resource;
+ } else {
+ // Use the system-wide system resource.
+ const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
+ m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
+ : m_kernel.GetSystemSystemResource());
+
+ m_is_default_application_system_resource = is_app;
- KSharedMemoryInfo* shemen_info = nullptr;
- const auto iter = std::find_if(
- m_shared_memory_list.begin(), m_shared_memory_list.end(),
- [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; });
- if (iter != m_shared_memory_list.end()) {
- shemen_info = *iter;
+ // Open reference to the system resource.
+ m_system_resource->Open();
}
- ASSERT(shemen_info != nullptr);
+ // Ensure we clean up our secure resource, if we fail.
+ ON_RESULT_FAILURE {
+ m_system_resource->Close();
+ m_system_resource = nullptr;
+ };
- if (shemen_info->Close()) {
- m_shared_memory_list.erase(iter);
- KSharedMemoryInfo::Free(m_kernel, shemen_info);
+ // Setup page table.
+ {
+ const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
+ const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
+ const bool enable_das_merge =
+ False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
+ R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+ params.code_address, params.code_num_pages * PageSize,
+ m_system_resource, res_limit, this->GetMemory(), 0));
}
+ ON_RESULT_FAILURE_2 {
+ m_page_table.Finalize();
+ };
- // Close a reference to the shared memory.
- shmem->Close();
-}
+ // Ensure we can insert the code region.
+ R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
+ KMemoryState::Code),
+ ResultInvalidMemoryRegion);
-void KProcess::RegisterThread(KThread* thread) {
- KScopedLightLock lk{m_list_lock};
+ // Map the code region.
+ R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState::Code,
+ KMemoryPermission::KernelRead));
- m_thread_list.push_back(thread);
-}
+ // Initialize capabilities.
+ R_TRY(m_capabilities.InitializeForKip(caps, std::addressof(m_page_table)));
-void KProcess::UnregisterThread(KThread* thread) {
- KScopedLightLock lk{m_list_lock};
+ // Initialize the process id.
+ m_process_id = m_kernel.CreateNewUserProcessID();
+ ASSERT(InitialProcessIdMin <= m_process_id);
+ ASSERT(m_process_id <= InitialProcessIdMax);
- m_thread_list.remove(thread);
-}
+ // Initialize the rest of the process.
+ R_TRY(this->Initialize(params, res_limit, true));
-u64 KProcess::GetFreeThreadCount() const {
- if (m_resource_limit != nullptr) {
- const auto current_value =
- m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
- const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
- return limit_value - current_value;
- } else {
- return 0;
- }
+ // We succeeded!
+ R_SUCCEED();
}
-Result KProcess::Reset() {
- // Lock the process and the scheduler.
- KScopedLightLock lk(m_state_lock);
- KScopedSchedulerLock sl{m_kernel};
+Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
+ std::span<const u32> user_caps, KResourceLimit* res_limit,
+ KMemoryManager::Pool pool, KProcessAddress aslr_space_start) {
+ ASSERT(res_limit != nullptr);
- // Validate that we're in a state that we can reset.
- R_UNLESS(m_state != State::Terminated, ResultInvalidState);
- R_UNLESS(m_is_signaled, ResultInvalidState);
+ // Set members.
+ m_memory_pool = pool;
+ m_is_default_application_system_resource = false;
+ m_is_immortal = false;
- // Clear signaled.
- m_is_signaled = false;
- R_SUCCEED();
-}
+ // Get the memory sizes.
+ const size_t code_num_pages = params.code_num_pages;
+ const size_t system_resource_num_pages = params.system_resource_num_pages;
+ const size_t code_size = code_num_pages * PageSize;
+ const size_t system_resource_size = system_resource_num_pages * PageSize;
-Result KProcess::SetActivity(ProcessActivity activity) {
- // Lock ourselves and the scheduler.
- KScopedLightLock lk{m_state_lock};
- KScopedLightLock list_lk{m_list_lock};
- KScopedSchedulerLock sl{m_kernel};
+ // Reserve memory for our code resource.
+ KScopedResourceReservation memory_reservation(
+ res_limit, Svc::LimitableResource::PhysicalMemoryMax, code_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
- // Validate our state.
- R_UNLESS(m_state != State::Terminating, ResultInvalidState);
- R_UNLESS(m_state != State::Terminated, ResultInvalidState);
+ // Setup our system resource.
+ if (system_resource_num_pages != 0) {
+ // Create a secure system resource.
+ KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
+ R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
- // Either pause or resume.
- if (activity == ProcessActivity::Paused) {
- // Verify that we're not suspended.
- R_UNLESS(!m_is_suspended, ResultInvalidState);
+ ON_RESULT_FAILURE {
+ secure_resource->Close();
+ };
- // Suspend all threads.
- for (auto* thread : this->GetThreadList()) {
- thread->RequestSuspend(SuspendType::Process);
- }
+ // Initialize the secure resource.
+ R_TRY(secure_resource->Initialize(system_resource_size, res_limit, m_memory_pool));
+
+ // Set our system resource.
+ m_system_resource = secure_resource;
- // Set ourselves as suspended.
- this->SetSuspended(true);
} else {
- ASSERT(activity == ProcessActivity::Runnable);
+ // Use the system-wide system resource.
+ const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
+ m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
+ : m_kernel.GetSystemSystemResource());
- // Verify that we're suspended.
- R_UNLESS(m_is_suspended, ResultInvalidState);
+ m_is_default_application_system_resource = is_app;
- // Resume all threads.
- for (auto* thread : this->GetThreadList()) {
- thread->Resume(SuspendType::Process);
- }
+ // Open reference to the system resource.
+ m_system_resource->Open();
+ }
- // Set ourselves as resumed.
- this->SetSuspended(false);
+ // Ensure we clean up our secure resource, if we fail.
+ ON_RESULT_FAILURE {
+ m_system_resource->Close();
+ m_system_resource = nullptr;
+ };
+
+ // Setup page table.
+ {
+ const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
+ const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
+ const bool enable_das_merge =
+ False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
+ R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
+ params.code_address, code_size, m_system_resource, res_limit,
+ this->GetMemory(), aslr_space_start));
+ }
+ ON_RESULT_FAILURE_2 {
+ m_page_table.Finalize();
+ };
+
+ // Ensure we can insert the code region.
+ R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
+ ResultInvalidMemoryRegion);
+
+ // Map the code region.
+ R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState::Code,
+ KMemoryPermission::KernelRead | KMemoryPermission::NotMapped));
+
+ // Initialize capabilities.
+ R_TRY(m_capabilities.InitializeForUser(user_caps, std::addressof(m_page_table)));
+
+ // Initialize the process id.
+ m_process_id = m_kernel.CreateNewUserProcessID();
+ ASSERT(ProcessIdMin <= m_process_id);
+ ASSERT(m_process_id <= ProcessIdMax);
+
+ // If we should optimize memory allocations, do so.
+ if (m_system_resource->IsSecureResource() &&
+ True(params.flags & Svc::CreateProcessFlag::OptimizeMemoryAllocation)) {
+ R_TRY(m_kernel.MemoryManager().InitializeOptimizedMemory(m_process_id, pool));
}
+ // Initialize the rest of the process.
+ R_TRY(this->Initialize(params, res_limit, true));
+
+ // We succeeded, so commit our memory reservation.
+ memory_reservation.Commit();
R_SUCCEED();
}
-Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
- bool is_hbl) {
- m_program_id = metadata.GetTitleID();
- m_ideal_core = metadata.GetMainThreadCore();
- m_is_64bit_process = metadata.Is64BitProgram();
- m_system_resource_size = metadata.GetSystemResourceSize();
- m_image_size = code_size;
- m_is_hbl = is_hbl;
+void KProcess::DoWorkerTaskImpl() {
+ // Terminate child threads.
+ TerminateChildren(m_kernel, this, nullptr);
- if (metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is39Bit) {
- // For 39-bit processes, the ASLR region starts at 0x800'0000 and is ~512GiB large.
- // However, some (buggy) programs/libraries like skyline incorrectly depend on the
- // existence of ASLR pages before the entry point, so we will adjust the load address
- // to point to about 2GiB into the ASLR region.
- m_code_address = 0x8000'0000;
- } else {
- // All other processes can be mapped at the beginning of the code region.
- if (metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is36Bit) {
- m_code_address = 0x800'0000;
- } else {
- m_code_address = 0x20'0000;
- }
+ // Finalize the handle table, if we're not immortal.
+ if (!m_is_immortal && m_is_handle_table_initialized) {
+ this->FinalizeHandleTable();
}
- KScopedResourceReservation memory_reservation(
- m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size);
- if (!memory_reservation.Succeeded()) {
- LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
- code_size + m_system_resource_size);
- R_RETURN(ResultLimitReached);
- }
- // Initialize process address space
- if (const Result result{m_page_table.InitializeForProcess(
- metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
- this->GetEntryPoint(), code_size, std::addressof(m_kernel.GetAppSystemResource()),
- m_resource_limit, m_kernel.System().ApplicationMemory())};
- result.IsError()) {
- R_RETURN(result);
- }
-
- // Map process code region
- if (const Result result{m_page_table.MapProcessCode(this->GetEntryPoint(), code_size / PageSize,
- KMemoryState::Code,
- KMemoryPermission::None)};
- result.IsError()) {
- R_RETURN(result);
- }
-
- // Initialize process capabilities
- const auto& caps{metadata.GetKernelCapabilities()};
- if (const Result result{
- m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)};
- result.IsError()) {
- R_RETURN(result);
- }
-
- // Set memory usage capacity
- switch (metadata.GetAddressSpaceType()) {
- case FileSys::ProgramAddressSpaceType::Is32Bit:
- case FileSys::ProgramAddressSpaceType::Is36Bit:
- case FileSys::ProgramAddressSpaceType::Is39Bit:
- m_memory_usage_capacity =
- m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart();
- break;
+ // Finish termination.
+ this->FinishTermination();
+}
- case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
- m_memory_usage_capacity =
- (m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart()) +
- (m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart());
- break;
+Result KProcess::StartTermination() {
+ // Finalize the handle table when we're done, if the process isn't immortal.
+ SCOPE_EXIT({
+ if (!m_is_immortal) {
+ this->FinalizeHandleTable();
+ }
+ });
- default:
- ASSERT(false);
- break;
- }
+ // Terminate child threads other than the current one.
+ R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
+}
- // Create TLS region
- R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
- memory_reservation.Commit();
+void KProcess::FinishTermination() {
+ // Only allow termination to occur if the process isn't immortal.
+ if (!m_is_immortal) {
+ // Release resource limit hint.
+ if (m_resource_limit != nullptr) {
+ m_memory_release_hint = this->GetUsedNonSystemUserPhysicalMemorySize();
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, 0,
+ m_memory_release_hint);
+ }
+
+ // Change state.
+ {
+ KScopedSchedulerLock sl(m_kernel);
+ this->ChangeState(State::Terminated);
+ }
- R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize()));
+ // Close.
+ this->Close();
+ }
}
-void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
- ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess);
- m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
+void KProcess::Exit() {
+ // Determine whether we need to start terminating
+ bool needs_terminate = false;
+ {
+ KScopedLightLock lk(m_state_lock);
+ KScopedSchedulerLock sl(m_kernel);
+
+ ASSERT(m_state != State::Created);
+ ASSERT(m_state != State::CreatedAttached);
+ ASSERT(m_state != State::Crashed);
+ ASSERT(m_state != State::Terminated);
+ if (m_state == State::Running || m_state == State::RunningAttached ||
+ m_state == State::DebugBreak) {
+ this->ChangeState(State::Terminating);
+ needs_terminate = true;
+ }
+ }
- const std::size_t heap_capacity{m_memory_usage_capacity -
- (m_main_thread_stack_size + m_image_size)};
- ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError());
+ // If we need to start termination, do so.
+ if (needs_terminate) {
+ this->StartTermination();
- this->ChangeState(State::Running);
+ // Register the process as a work task.
+ m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
+ }
- SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top);
+ // Exit the current thread.
+ GetCurrentThread(m_kernel).Exit();
}
-void KProcess::PrepareForTermination() {
- this->ChangeState(State::Terminating);
+Result KProcess::Terminate() {
+ // Determine whether we need to start terminating.
+ bool needs_terminate = false;
+ {
+ KScopedLightLock lk(m_state_lock);
- const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
- for (auto* thread : in_thread_list) {
- if (thread->GetOwnerProcess() != this)
- continue;
+ // Check whether we're allowed to terminate.
+ R_UNLESS(m_state != State::Created, ResultInvalidState);
+ R_UNLESS(m_state != State::CreatedAttached, ResultInvalidState);
- if (thread == GetCurrentThreadPointer(m_kernel))
- continue;
+ KScopedSchedulerLock sl(m_kernel);
- // TODO(Subv): When are the other running/ready threads terminated?
- ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
- "Exiting processes with non-waiting threads is currently unimplemented");
+ if (m_state == State::Running || m_state == State::RunningAttached ||
+ m_state == State::Crashed || m_state == State::DebugBreak) {
+ this->ChangeState(State::Terminating);
+ needs_terminate = true;
+ }
+ }
- thread->Exit();
+ // If we need to terminate, do so.
+ if (needs_terminate) {
+ // Start termination.
+ if (R_SUCCEEDED(this->StartTermination())) {
+ // Finish termination.
+ this->FinishTermination();
+ } else {
+ // Register the process as a work task.
+ m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit,
+ this);
}
- };
+ }
- stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
+ R_SUCCEED();
+}
- this->DeleteThreadLocalRegion(m_plr_address);
- m_plr_address = 0;
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
+ // Lock ourselves, to prevent concurrent access.
+ KScopedLightLock lk(m_state_lock);
- if (m_resource_limit) {
- m_resource_limit->Release(LimitableResource::PhysicalMemoryMax,
- m_main_thread_stack_size + m_image_size);
+ // Try to find an existing info for the memory.
+ KSharedMemoryInfo* info = nullptr;
+ for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
+ if (it->GetSharedMemory() == shmem) {
+ info = std::addressof(*it);
+ break;
+ }
}
- this->ChangeState(State::Terminated);
-}
+ // If we didn't find an info, create one.
+ if (info == nullptr) {
+ // Allocate a new info.
+ info = KSharedMemoryInfo::Allocate(m_kernel);
+ R_UNLESS(info != nullptr, ResultOutOfResource);
-void KProcess::Finalize() {
- // Free all shared memory infos.
- {
- auto it = m_shared_memory_list.begin();
- while (it != m_shared_memory_list.end()) {
- KSharedMemoryInfo* info = *it;
- KSharedMemory* shmem = info->GetSharedMemory();
+ // Initialize the info and add it to our list.
+ info->Initialize(shmem);
+ m_shared_memory_list.push_back(*info);
+ }
- while (!info->Close()) {
- shmem->Close();
- }
+ // Open a reference to the shared memory and its info.
+ shmem->Open();
+ info->Open();
- shmem->Close();
+ R_SUCCEED();
+}
- it = m_shared_memory_list.erase(it);
- KSharedMemoryInfo::Free(m_kernel, info);
+void KProcess::RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
+ // Lock ourselves, to prevent concurrent access.
+ KScopedLightLock lk(m_state_lock);
+
+ // Find an existing info for the memory.
+ KSharedMemoryInfo* info = nullptr;
+ auto it = m_shared_memory_list.begin();
+ for (; it != m_shared_memory_list.end(); ++it) {
+ if (it->GetSharedMemory() == shmem) {
+ info = std::addressof(*it);
+ break;
}
}
+ ASSERT(info != nullptr);
- // Release memory to the resource limit.
- if (m_resource_limit != nullptr) {
- m_resource_limit->Close();
- m_resource_limit = nullptr;
+ // Close a reference to the info and its memory.
+ if (info->Close()) {
+ m_shared_memory_list.erase(it);
+ KSharedMemoryInfo::Free(m_kernel, info);
}
- // Finalize the page table.
- m_page_table.Finalize();
-
- // Perform inherited finalization.
- KSynchronizationObject::Finalize();
+ shmem->Close();
}
Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
@@ -518,7 +601,7 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
// See if we can get a region from a partially used TLP.
{
- KScopedSchedulerLock sl{m_kernel};
+ KScopedSchedulerLock sl(m_kernel);
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
tlr = it->Reserve();
@@ -538,7 +621,9 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
// Allocate a new page.
tlp = KThreadLocalPage::Allocate(m_kernel);
R_UNLESS(tlp != nullptr, ResultOutOfMemory);
- auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); });
+ ON_RESULT_FAILURE {
+ KThreadLocalPage::Free(m_kernel, tlp);
+ };
// Initialize the new page.
R_TRY(tlp->Initialize(m_kernel, this));
@@ -549,7 +634,7 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
// Insert into our tree.
{
- KScopedSchedulerLock sl{m_kernel};
+ KScopedSchedulerLock sl(m_kernel);
if (tlp->IsAllUsed()) {
m_fully_used_tlp_tree.insert(*tlp);
} else {
@@ -558,7 +643,6 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
}
// We succeeded!
- tlp_guard.Cancel();
*out = tlr;
R_SUCCEED();
}
@@ -568,7 +652,7 @@ Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
// Release the region.
{
- KScopedSchedulerLock sl{m_kernel};
+ KScopedSchedulerLock sl(m_kernel);
// Try to find the page in the partially used list.
auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
@@ -611,95 +695,213 @@ Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
R_SUCCEED();
}
-bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
- const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
- return wp.type == DebugWatchpointType::None;
- })};
+bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ return rl->Reserve(which, value);
+ } else {
+ return true;
+ }
+}
- if (watch == m_watchpoints.end()) {
- return false;
+bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ return rl->Reserve(which, value, timeout);
+ } else {
+ return true;
}
+}
- watch->start_address = addr;
- watch->end_address = addr + size;
- watch->type = type;
+void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ rl->Release(which, value);
+ }
+}
- for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
- page += PageSize) {
- m_debug_page_refcounts[page]++;
- this->GetMemory().MarkRegionDebug(page, PageSize, true);
+void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint) {
+ if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
+ rl->Release(which, value, hint);
}
+}
- return true;
+void KProcess::IncrementRunningThreadCount() {
+ ASSERT(m_num_running_threads.load() >= 0);
+
+ ++m_num_running_threads;
}
-bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
- const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
- return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
- })};
+void KProcess::DecrementRunningThreadCount() {
+ ASSERT(m_num_running_threads.load() > 0);
- if (watch == m_watchpoints.end()) {
+ if (const auto prev = m_num_running_threads--; prev == 1) {
+ this->Terminate();
+ }
+}
+
+bool KProcess::EnterUserException() {
+ // Get the current thread.
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+ ASSERT(this == cur_thread->GetOwnerProcess());
+
+ // Check that we haven't already claimed the exception thread.
+ if (m_exception_thread == cur_thread) {
return false;
}
- watch->start_address = 0;
- watch->end_address = 0;
- watch->type = DebugWatchpointType::None;
+ // Create the wait queue we'll be using.
+ ThreadQueueImplForKProcessEnterUserException wait_queue(m_kernel,
+ std::addressof(m_exception_thread));
- for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
- page += PageSize) {
- m_debug_page_refcounts[page]--;
- if (!m_debug_page_refcounts[page]) {
- this->GetMemory().MarkRegionDebug(page, PageSize, false);
+ // Claim the exception thread.
+ {
+ // Lock the scheduler.
+ KScopedSchedulerLock sl(m_kernel);
+
+ // Check that we're not terminating.
+ if (cur_thread->IsTerminationRequested()) {
+ return false;
}
+
+ // If we don't have an exception thread, we can just claim it directly.
+ if (m_exception_thread == nullptr) {
+ m_exception_thread = cur_thread;
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ return true;
+ }
+
+ // Otherwise, we need to wait until we don't have an exception thread.
+
+ // Add the current thread as a waiter on the current exception thread.
+ cur_thread->SetKernelAddressKey(
+ reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
+ m_exception_thread->AddWaiter(cur_thread);
+
+ // Wait to claim the exception thread.
+ cur_thread->BeginWait(std::addressof(wait_queue));
}
- return true;
+ // If our wait didn't end due to thread termination, we succeeded.
+ return ResultTerminationRequested != cur_thread->GetWaitResult();
}
-void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
- const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
- Svc::MemoryPermission permission) {
- m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
- };
+bool KProcess::LeaveUserException() {
+ return this->ReleaseUserException(GetCurrentThreadPointer(m_kernel));
+}
- this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
+bool KProcess::ReleaseUserException(KThread* thread) {
+ KScopedSchedulerLock sl(m_kernel);
- ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
- ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
- ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
+ if (m_exception_thread == thread) {
+ m_exception_thread = nullptr;
+
+ // Remove waiter thread.
+ bool has_waiters;
+ if (KThread* next = thread->RemoveKernelWaiterByKey(
+ std::addressof(has_waiters),
+ reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
+ next != nullptr) {
+ next->EndWait(ResultSuccess);
+ }
+
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+
+ return true;
+ } else {
+ return false;
+ }
}
-bool KProcess::IsSignaled() const {
- ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
- return m_is_signaled;
+void KProcess::RegisterThread(KThread* thread) {
+ KScopedLightLock lk(m_list_lock);
+
+ m_thread_list.push_back(*thread);
}
-KProcess::KProcess(KernelCore& kernel)
- : KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()},
- m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()},
- m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {}
+void KProcess::UnregisterThread(KThread* thread) {
+ KScopedLightLock lk(m_list_lock);
-KProcess::~KProcess() = default;
+ m_thread_list.erase(m_thread_list.iterator_to(*thread));
+}
+
+size_t KProcess::GetUsedUserPhysicalMemorySize() const {
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
+ const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
+
+ return norm_size + other_size + sec_size;
+}
-void KProcess::ChangeState(State new_state) {
- if (m_state == new_state) {
- return;
+size_t KProcess::GetTotalUserPhysicalMemorySize() const {
+ // Get the amount of free and used size.
+ const size_t free_size =
+ m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
+ const size_t max_size = m_max_process_memory;
+
+ // Determine used size.
+ // NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
+ // GetUsedUserPhysicalMemorySize().
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
+ const size_t sec_size = this->GetRequiredSecureMemorySize();
+ const size_t used_size = norm_size + other_size + sec_size;
+
+ // NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
+ // does it this way.
+ if (used_size + free_size > max_size) {
+ return max_size;
+ } else {
+ return free_size + this->GetUsedUserPhysicalMemorySize();
}
+}
+
+size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
- m_state = new_state;
- m_is_signaled = true;
- this->NotifyAvailable();
+ return norm_size + other_size;
}
-Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
+size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
+ // Get the amount of free and used size.
+ const size_t free_size =
+ m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
+ const size_t max_size = m_max_process_memory;
+
+ // Determine used size.
+ // NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
+ // GetUsedUserPhysicalMemorySize().
+ const size_t norm_size = m_page_table.GetNormalMemorySize();
+ const size_t other_size = m_code_size + m_main_thread_stack_size;
+ const size_t sec_size = this->GetRequiredSecureMemorySize();
+ const size_t used_size = norm_size + other_size + sec_size;
+
+ // NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
+ // does it this way.
+ if (used_size + free_size > max_size) {
+ return max_size - this->GetRequiredSecureMemorySizeNonDefault();
+ } else {
+ return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
+ }
+}
+
+Result KProcess::Run(s32 priority, size_t stack_size) {
+ // Lock ourselves, to prevent concurrent access.
+ KScopedLightLock lk(m_state_lock);
+
+ // Validate that we're in a state where we can initialize.
+ const auto state = m_state;
+ R_UNLESS(state == State::Created || state == State::CreatedAttached, ResultInvalidState);
+
+ // Place a tentative reservation of a thread for this process.
+ KScopedResourceReservation thread_reservation(this, Svc::LimitableResource::ThreadCountMax);
+ R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached);
+
// Ensure that we haven't already allocated stack.
ASSERT(m_main_thread_stack_size == 0);
// Ensure that we're allocating a valid stack.
stack_size = Common::AlignUp(stack_size, PageSize);
- // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory);
- R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory);
+ R_UNLESS(stack_size + m_code_size <= m_max_process_memory, ResultOutOfMemory);
+ R_UNLESS(stack_size + m_code_size >= m_code_size, ResultOutOfMemory);
// Place a tentative reservation of memory for our new stack.
KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
@@ -707,21 +909,370 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
// Allocate and map our stack.
+ KProcessAddress stack_top = 0;
if (stack_size) {
KProcessAddress stack_bottom;
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
- m_main_thread_stack_top = stack_bottom + stack_size;
+ stack_top = stack_bottom + stack_size;
m_main_thread_stack_size = stack_size;
}
+ // Ensure our stack is safe to clean up on exit.
+ ON_RESULT_FAILURE {
+ if (m_main_thread_stack_size) {
+ ASSERT(R_SUCCEEDED(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size,
+ m_main_thread_stack_size / PageSize,
+ KMemoryState::Stack)));
+ m_main_thread_stack_size = 0;
+ }
+ };
+
+ // Set our maximum heap size.
+ R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory -
+ (m_main_thread_stack_size + m_code_size)));
+
+ // Initialize our handle table.
+ R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
+ ON_RESULT_FAILURE_2 {
+ this->FinalizeHandleTable();
+ };
+
+ // Create a new thread for the process.
+ KThread* main_thread = KThread::Create(m_kernel);
+ R_UNLESS(main_thread != nullptr, ResultOutOfResource);
+ SCOPE_EXIT({ main_thread->Close(); });
+
+ // Initialize the thread.
+ R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
+ stack_top, priority, m_ideal_core_id, this));
+
+ // Register the thread, and commit our reservation.
+ KThread::Register(m_kernel, main_thread);
+ thread_reservation.Commit();
+
+ // Add the thread to our handle table.
+ Handle thread_handle;
+ R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
+
+ // Set the thread arguments.
+ main_thread->GetContext32().cpu_registers[0] = 0;
+ main_thread->GetContext64().cpu_registers[0] = 0;
+ main_thread->GetContext32().cpu_registers[1] = thread_handle;
+ main_thread->GetContext64().cpu_registers[1] = thread_handle;
+
+ // Update our state.
+ this->ChangeState((state == State::Created) ? State::Running : State::RunningAttached);
+ ON_RESULT_FAILURE_2 {
+ this->ChangeState(state);
+ };
+
+ // Suspend for debug, if we should.
+ if (m_kernel.System().DebuggerEnabled()) {
+ main_thread->RequestSuspend(SuspendType::Debug);
+ }
+
+ // Run our thread.
+ R_TRY(main_thread->Run());
+
+ // Open a reference to represent that we're running.
+ this->Open();
+
// We succeeded! Commit our memory reservation.
mem_reservation.Commit();
R_SUCCEED();
}
+Result KProcess::Reset() {
+ // Lock the process and the scheduler.
+ KScopedLightLock lk(m_state_lock);
+ KScopedSchedulerLock sl(m_kernel);
+
+ // Validate that we're in a state that we can reset.
+ R_UNLESS(m_state != State::Terminated, ResultInvalidState);
+ R_UNLESS(m_is_signaled, ResultInvalidState);
+
+ // Clear signaled.
+ m_is_signaled = false;
+ R_SUCCEED();
+}
+
+Result KProcess::SetActivity(Svc::ProcessActivity activity) {
+ // Lock ourselves and the scheduler.
+ KScopedLightLock lk(m_state_lock);
+ KScopedLightLock list_lk(m_list_lock);
+ KScopedSchedulerLock sl(m_kernel);
+
+ // Validate our state.
+ R_UNLESS(m_state != State::Terminating, ResultInvalidState);
+ R_UNLESS(m_state != State::Terminated, ResultInvalidState);
+
+ // Either pause or resume.
+ if (activity == Svc::ProcessActivity::Paused) {
+ // Verify that we're not suspended.
+ R_UNLESS(!m_is_suspended, ResultInvalidState);
+
+ // Suspend all threads.
+ auto end = this->GetThreadList().end();
+ for (auto it = this->GetThreadList().begin(); it != end; ++it) {
+ it->RequestSuspend(SuspendType::Process);
+ }
+
+ // Set ourselves as suspended.
+ this->SetSuspended(true);
+ } else {
+ ASSERT(activity == Svc::ProcessActivity::Runnable);
+
+ // Verify that we're suspended.
+ R_UNLESS(m_is_suspended, ResultInvalidState);
+
+ // Resume all threads.
+ auto end = this->GetThreadList().end();
+ for (auto it = this->GetThreadList().begin(); it != end; ++it) {
+ it->Resume(SuspendType::Process);
+ }
+
+ // Set ourselves as resumed.
+ this->SetSuspended(false);
+ }
+
+ R_SUCCEED();
+}
+
+void KProcess::PinCurrentThread() {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(m_kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+
+ // If the thread isn't terminated, pin it.
+ if (!cur_thread->IsTerminationRequested()) {
+ // Pin it.
+ this->PinThread(core_id, cur_thread);
+ cur_thread->Pin(core_id);
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+ }
+}
+
+void KProcess::UnpinCurrentThread() {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+
+ // Get the current thread.
+ const s32 core_id = GetCurrentCoreId(m_kernel);
+ KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+
+ // Unpin it.
+ cur_thread->Unpin();
+ this->UnpinThread(core_id, cur_thread);
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+}
+
+void KProcess::UnpinThread(KThread* thread) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+
+ // Get the thread's core id.
+ const auto core_id = thread->GetActiveCore();
+
+ // Unpin it.
+ this->UnpinThread(core_id, thread);
+ thread->Unpin();
+
+ // An update is needed.
+ KScheduler::SetSchedulerUpdateNeeded(m_kernel);
+}
+
+Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids,
+ s32 max_out_count) {
+ // TODO: use current memory reference
+ auto& memory = m_kernel.System().ApplicationMemory();
+
+ // Lock the list.
+ KScopedLightLock lk(m_list_lock);
+
+ // Iterate over the list.
+ s32 count = 0;
+ auto end = this->GetThreadList().end();
+ for (auto it = this->GetThreadList().begin(); it != end; ++it) {
+ // If we're within array bounds, write the id.
+ if (count < max_out_count) {
+ // Get the thread id.
+ KThread* thread = std::addressof(*it);
+ const u64 id = thread->GetId();
+
+ // Copy the id to userland.
+ memory.Write64(out_thread_ids + count * sizeof(u64), id);
+ }
+
+ // Increment the count.
+ ++count;
+ }
+
+ // We successfully iterated the list.
+ *out_num_threads = count;
+ R_SUCCEED();
+}
+
+void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
+
+KProcess::KProcess(KernelCore& kernel)
+ : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
+ m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
+ m_handle_table{kernel} {}
+KProcess::~KProcess() = default;
+
+Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
+ KProcessAddress aslr_space_start, bool is_hbl) {
+ // Create a resource limit for the process.
+ const auto physical_memory_size =
+ m_kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
+ auto* res_limit =
+ Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
+
+ // Ensure we maintain a clean state on exit.
+ SCOPE_EXIT({ res_limit->Close(); });
+
+ // Declare flags and code address.
+ Svc::CreateProcessFlag flag{};
+ u64 code_address{};
+
+ // We are an application.
+ flag |= Svc::CreateProcessFlag::IsApplication;
+
+ // If we are 64-bit, create as such.
+ if (metadata.Is64BitProgram()) {
+ flag |= Svc::CreateProcessFlag::Is64Bit;
+ }
+
+ // Set the address space type and code address.
+ switch (metadata.GetAddressSpaceType()) {
+ case FileSys::ProgramAddressSpaceType::Is39Bit:
+ flag |= Svc::CreateProcessFlag::AddressSpace64Bit;
+
+ // For 39-bit processes, the ASLR region starts at 0x800'0000 and is ~512GiB large.
+ // However, some (buggy) programs/libraries like skyline incorrectly depend on the
+ // existence of ASLR pages before the entry point, so we will adjust the load address
+ // to point to about 2GiB into the ASLR region.
+ code_address = 0x8000'0000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is36Bit:
+ flag |= Svc::CreateProcessFlag::AddressSpace64BitDeprecated;
+ code_address = 0x800'0000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is32Bit:
+ flag |= Svc::CreateProcessFlag::AddressSpace32Bit;
+ code_address = 0x20'0000;
+ break;
+ case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
+ flag |= Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias;
+ code_address = 0x20'0000;
+ break;
+ }
+
+ Svc::CreateProcessParameter params{
+ .name = {},
+ .version = {},
+ .program_id = metadata.GetTitleID(),
+ .code_address = code_address + GetInteger(aslr_space_start),
+ .code_num_pages = static_cast<s32>(code_size / PageSize),
+ .flags = flag,
+ .reslimit = Svc::InvalidHandle,
+ .system_resource_num_pages = static_cast<s32>(metadata.GetSystemResourceSize() / PageSize),
+ };
+
+ // Set the process name.
+ const auto& name = metadata.GetName();
+ static_assert(sizeof(params.name) <= sizeof(name));
+ std::memcpy(params.name.data(), name.data(), sizeof(params.name));
+
+ // Initialize for application process.
+ R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit,
+ KMemoryManager::Pool::Application, aslr_space_start));
+
+ // Assign remaining properties.
+ m_is_hbl = is_hbl;
+ m_ideal_core_id = metadata.GetMainThreadCore();
+
+ // We succeeded.
+ R_SUCCEED();
+}
+
+void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
+ const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
+ Svc::MemoryPermission permission) {
+ m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
+ };
+
+ this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
+
+ ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
+ ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
+ ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
+
+#ifdef HAS_NCE
+ if (Settings::IsNceEnabled()) {
+ auto& buffer = m_kernel.System().DeviceMemory().buffer;
+ const auto& code = code_set.CodeSegment();
+ const auto& patch = code_set.PatchSegment();
+ buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true);
+ buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true);
+ ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
+ }
+#endif
+}
+
+bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
+ const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
+ return wp.type == DebugWatchpointType::None;
+ })};
+
+ if (watch == m_watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = addr;
+ watch->end_address = addr + size;
+ watch->type = type;
+
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
+ m_debug_page_refcounts[page]++;
+ this->GetMemory().MarkRegionDebug(page, PageSize, true);
+ }
+
+ return true;
+}
+
+bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
+ const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
+ return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
+ })};
+
+ if (watch == m_watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = 0;
+ watch->end_address = 0;
+ watch->type = DebugWatchpointType::None;
+
+ for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
+ page += PageSize) {
+ m_debug_page_refcounts[page]--;
+ if (!m_debug_page_refcounts[page]) {
+ this->GetMemory().MarkRegionDebug(page, PageSize, false);
+ }
+ }
+
+ return true;
+}
+
Core::Memory::Memory& KProcess::GetMemory() const {
// TODO: per-process memory
return m_kernel.System().ApplicationMemory();
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 146e07a57..d8cd0fdde 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -1,59 +1,24 @@
-// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <array>
-#include <cstddef>
-#include <list>
#include <map>
-#include <string>
+
+#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_address_arbiter.h"
-#include "core/hle/kernel/k_auto_object.h"
+#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_page_table_manager.h"
+#include "core/hle/kernel/k_process_page_table.h"
+#include "core/hle/kernel/k_system_resource.h"
+#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
-#include "core/hle/kernel/k_typed_address.h"
-#include "core/hle/kernel/k_worker_task.h"
-#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/slab_helpers.h"
-#include "core/hle/result.h"
-
-namespace Core {
-namespace Memory {
-class Memory;
-};
-
-class System;
-} // namespace Core
-
-namespace FileSys {
-class ProgramMetadata;
-}
namespace Kernel {
-class KernelCore;
-class KResourceLimit;
-class KThread;
-class KSharedMemoryInfo;
-class TLSPage;
-
-struct CodeSet;
-
-enum class MemoryRegion : u16 {
- APPLICATION = 1,
- SYSTEM = 2,
- BASE = 3,
-};
-
-enum class ProcessActivity : u32 {
- Runnable,
- Paused,
-};
-
enum class DebugWatchpointType : u8 {
None = 0,
Read = 1 << 0,
@@ -72,9 +37,6 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public:
- explicit KProcess(KernelCore& kernel);
- ~KProcess() override;
-
enum class State {
Created = static_cast<u32>(Svc::ProcessState::Created),
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
@@ -86,470 +48,502 @@ public:
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
};
- enum : u64 {
- /// Lowest allowed process ID for a kernel initial process.
- InitialKIPIDMin = 1,
- /// Highest allowed process ID for a kernel initial process.
- InitialKIPIDMax = 80,
+ using ThreadList = Common::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
- /// Lowest allowed process ID for a userland process.
- ProcessIDMin = 81,
- /// Highest allowed process ID for a userland process.
- ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
- };
+ static constexpr size_t AslrAlignment = 2_MiB;
- // Used to determine how process IDs are assigned.
- enum class ProcessType {
- KernelInternal,
- Userland,
- };
-
- static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
+public:
+ static constexpr u64 InitialProcessIdMin = 1;
+ static constexpr u64 InitialProcessIdMax = 0x50;
- static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type, KResourceLimit* res_limit);
+ static constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
+ static constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
- /// Gets a reference to the process' page table.
- KPageTable& GetPageTable() {
- return m_page_table;
- }
+private:
+ using SharedMemoryInfoList = Common::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
+ using TLPTree =
+ Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
+ using TLPIterator = TLPTree::iterator;
- /// Gets const a reference to the process' page table.
- const KPageTable& GetPageTable() const {
- return m_page_table;
- }
+private:
+ KProcessPageTable m_page_table;
+ std::atomic<size_t> m_used_kernel_memory_size{};
+ TLPTree m_fully_used_tlp_tree{};
+ TLPTree m_partially_used_tlp_tree{};
+ s32 m_ideal_core_id{};
+ KResourceLimit* m_resource_limit{};
+ KSystemResource* m_system_resource{};
+ size_t m_memory_release_hint{};
+ State m_state{};
+ KLightLock m_state_lock;
+ KLightLock m_list_lock;
+ KConditionVariable m_cond_var;
+ KAddressArbiter m_address_arbiter;
+ std::array<u64, 4> m_entropy{};
+ bool m_is_signaled{};
+ bool m_is_initialized{};
+ bool m_is_application{};
+ bool m_is_default_application_system_resource{};
+ bool m_is_hbl{};
+ std::array<char, 13> m_name{};
+ std::atomic<u16> m_num_running_threads{};
+ Svc::CreateProcessFlag m_flags{};
+ KMemoryManager::Pool m_memory_pool{};
+ s64 m_schedule_count{};
+ KCapabilities m_capabilities{};
+ u64 m_program_id{};
+ u64 m_process_id{};
+ KProcessAddress m_code_address{};
+ size_t m_code_size{};
+ size_t m_main_thread_stack_size{};
+ size_t m_max_process_memory{};
+ u32 m_version{};
+ KHandleTable m_handle_table;
+ KProcessAddress m_plr_address{};
+ KThread* m_exception_thread{};
+ ThreadList m_thread_list{};
+ SharedMemoryInfoList m_shared_memory_list{};
+ bool m_is_suspended{};
+ bool m_is_immortal{};
+ bool m_is_handle_table_initialized{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
+ std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_switch_counts{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
+ std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
+ std::map<KProcessAddress, u64> m_debug_page_refcounts{};
+ std::atomic<s64> m_cpu_time{};
+ std::atomic<s64> m_num_process_switches{};
+ std::atomic<s64> m_num_thread_switches{};
+ std::atomic<s64> m_num_fpu_switches{};
+ std::atomic<s64> m_num_supervisor_calls{};
+ std::atomic<s64> m_num_ipc_messages{};
+ std::atomic<s64> m_num_ipc_replies{};
+ std::atomic<s64> m_num_ipc_receives{};
+#ifdef HAS_NCE
+ std::unordered_map<u64, u64> m_post_handlers{};
+#endif
- /// Gets a reference to the process' handle table.
- KHandleTable& GetHandleTable() {
- return m_handle_table;
- }
+private:
+ Result StartTermination();
+ void FinishTermination();
- /// Gets a const reference to the process' handle table.
- const KHandleTable& GetHandleTable() const {
- return m_handle_table;
+ void PinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(m_pinned_threads[core_id] == nullptr);
+ m_pinned_threads[core_id] = thread;
}
- /// Gets a reference to process's memory.
- Core::Memory::Memory& GetMemory() const;
-
- Result SignalToAddress(KProcessAddress address) {
- return m_condition_var.SignalToAddress(address);
+ void UnpinThread(s32 core_id, KThread* thread) {
+ ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
+ ASSERT(thread != nullptr);
+ ASSERT(m_pinned_threads[core_id] == thread);
+ m_pinned_threads[core_id] = nullptr;
}
- Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
- return m_condition_var.WaitForAddress(handle, address, tag);
- }
+public:
+ explicit KProcess(KernelCore& kernel);
+ ~KProcess() override;
- void SignalConditionVariable(u64 cv_key, int32_t count) {
- return m_condition_var.Signal(cv_key, count);
- }
+ Result Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
+ bool is_real);
- Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
- R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
- }
+ Result Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
+ std::span<const u32> caps, KResourceLimit* res_limit,
+ KMemoryManager::Pool pool, bool immortal);
+ Result Initialize(const Svc::CreateProcessParameter& params, std::span<const u32> user_caps,
+ KResourceLimit* res_limit, KMemoryManager::Pool pool,
+ KProcessAddress aslr_space_start);
+ void Exit();
- Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
- s32 count) {
- R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
+ const char* GetName() const {
+ return m_name.data();
}
- Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
- s64 timeout) {
- R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
+ u64 GetProgramId() const {
+ return m_program_id;
}
- KProcessAddress GetProcessLocalRegionAddress() const {
- return m_plr_address;
+ u64 GetProcessId() const {
+ return m_process_id;
}
- /// Gets the current status of the process
State GetState() const {
return m_state;
}
- /// Gets the unique ID that identifies this particular process.
- u64 GetProcessId() const {
- return m_process_id;
+ u64 GetCoreMask() const {
+ return m_capabilities.GetCoreMask();
+ }
+ u64 GetPhysicalCoreMask() const {
+ return m_capabilities.GetPhysicalCoreMask();
+ }
+ u64 GetPriorityMask() const {
+ return m_capabilities.GetPriorityMask();
}
- /// Gets the program ID corresponding to this process.
- u64 GetProgramId() const {
- return m_program_id;
+ s32 GetIdealCoreId() const {
+ return m_ideal_core_id;
+ }
+ void SetIdealCoreId(s32 core_id) {
+ m_ideal_core_id = core_id;
}
- KProcessAddress GetEntryPoint() const {
- return m_code_address;
+ bool CheckThreadPriority(s32 prio) const {
+ return ((1ULL << prio) & this->GetPriorityMask()) != 0;
}
- /// Gets the resource limit descriptor for this process
- KResourceLimit* GetResourceLimit() const;
+ u32 GetCreateProcessFlags() const {
+ return static_cast<u32>(m_flags);
+ }
- /// Gets the ideal CPU core ID for this process
- u8 GetIdealCoreId() const {
- return m_ideal_core;
+ bool Is64Bit() const {
+ return True(m_flags & Svc::CreateProcessFlag::Is64Bit);
}
- /// Checks if the specified thread priority is valid.
- bool CheckThreadPriority(s32 prio) const {
- return ((1ULL << prio) & GetPriorityMask()) != 0;
+ KProcessAddress GetEntryPoint() const {
+ return m_code_address;
}
- /// Gets the bitmask of allowed cores that this process' threads can run on.
- u64 GetCoreMask() const {
- return m_capabilities.GetCoreMask();
+ size_t GetMainStackSize() const {
+ return m_main_thread_stack_size;
}
- /// Gets the bitmask of allowed thread priorities.
- u64 GetPriorityMask() const {
- return m_capabilities.GetPriorityMask();
+ KMemoryManager::Pool GetMemoryPool() const {
+ return m_memory_pool;
}
- /// Gets the amount of secure memory to allocate for memory management.
- u32 GetSystemResourceSize() const {
- return m_system_resource_size;
+ u64 GetRandomEntropy(size_t i) const {
+ return m_entropy[i];
}
- /// Gets the amount of secure memory currently in use for memory management.
- u32 GetSystemResourceUsage() const {
- // On hardware, this returns the amount of system resource memory that has
- // been used by the kernel. This is problematic for Yuzu to emulate, because
- // system resource memory is used for page tables -- and yuzu doesn't really
- // have a way to calculate how much memory is required for page tables for
- // the current process at any given time.
- // TODO: Is this even worth implementing? Games may retrieve this value via
- // an SDK function that gets used + available system resource size for debug
- // or diagnostic purposes. However, it seems unlikely that a game would make
- // decisions based on how much system memory is dedicated to its page tables.
- // Is returning a value other than zero wise?
- return 0;
+ bool IsApplication() const {
+ return m_is_application;
}
- /// Whether this process is an AArch64 or AArch32 process.
- bool Is64BitProcess() const {
- return m_is_64bit_process;
+ bool IsDefaultApplicationSystemResource() const {
+ return m_is_default_application_system_resource;
}
bool IsSuspended() const {
return m_is_suspended;
}
-
void SetSuspended(bool suspended) {
m_is_suspended = suspended;
}
- /// Gets the total running time of the process instance in ticks.
- u64 GetCPUTimeTicks() const {
- return m_total_process_running_time_ticks;
+ Result Terminate();
+
+ bool IsTerminated() const {
+ return m_state == State::Terminated;
}
- /// Updates the total running time, adding the given ticks to it.
- void UpdateCPUTimeTicks(u64 ticks) {
- m_total_process_running_time_ticks += ticks;
+ bool IsPermittedSvc(u32 svc_id) const {
+ return m_capabilities.IsPermittedSvc(svc_id);
}
- /// Gets the process schedule count, used for thread yielding
- s64 GetScheduledCount() const {
- return m_schedule_count;
+ bool IsPermittedInterrupt(s32 interrupt_id) const {
+ return m_capabilities.IsPermittedInterrupt(interrupt_id);
}
- /// Increments the process schedule count, used for thread yielding.
- void IncrementScheduledCount() {
- ++m_schedule_count;
+ bool IsPermittedDebug() const {
+ return m_capabilities.IsPermittedDebug();
}
- void IncrementRunningThreadCount();
- void DecrementRunningThreadCount();
+ bool CanForceDebug() const {
+ return m_capabilities.CanForceDebug();
+ }
- void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
- m_running_threads[core] = thread;
- m_running_thread_idle_counts[core] = idle_count;
+ bool IsHbl() const {
+ return m_is_hbl;
}
- void ClearRunningThread(KThread* thread) {
- for (size_t i = 0; i < m_running_threads.size(); ++i) {
- if (m_running_threads[i] == thread) {
- m_running_threads[i] = nullptr;
- }
- }
+ u32 GetAllocateOption() const {
+ return m_page_table.GetAllocateOption();
}
- [[nodiscard]] KThread* GetRunningThread(s32 core) const {
- return m_running_threads[core];
+ ThreadList& GetThreadList() {
+ return m_thread_list;
+ }
+ const ThreadList& GetThreadList() const {
+ return m_thread_list;
}
+ bool EnterUserException();
+ bool LeaveUserException();
bool ReleaseUserException(KThread* thread);
- [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
+ KThread* GetPinnedThread(s32 core_id) const {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
return m_pinned_threads[core_id];
}
- /// Gets 8 bytes of random data for svcGetInfo RandomEntropy
- u64 GetRandomEntropy(std::size_t index) const {
- return m_random_entropy.at(index);
+ const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
+ return m_capabilities.GetSvcPermissions();
}
- /// Retrieves the total physical memory available to this process in bytes.
- u64 GetTotalPhysicalMemoryAvailable();
-
- /// Retrieves the total physical memory available to this process in bytes,
- /// without the size of the personal system resource heap added to it.
- u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
-
- /// Retrieves the total physical memory used by this process in bytes.
- u64 GetTotalPhysicalMemoryUsed();
+ KResourceLimit* GetResourceLimit() const {
+ return m_resource_limit;
+ }
- /// Retrieves the total physical memory used by this process in bytes,
- /// without the size of the personal system resource heap added to it.
- u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
+ bool ReserveResource(Svc::LimitableResource which, s64 value);
+ bool ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout);
+ void ReleaseResource(Svc::LimitableResource which, s64 value);
+ void ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint);
- /// Gets the list of all threads created with this process as their owner.
- std::list<KThread*>& GetThreadList() {
- return m_thread_list;
+ KLightLock& GetStateLock() {
+ return m_state_lock;
+ }
+ KLightLock& GetListLock() {
+ return m_list_lock;
}
- /// Registers a thread as being created under this process,
- /// adding it to this process' thread list.
- void RegisterThread(KThread* thread);
+ KProcessPageTable& GetPageTable() {
+ return m_page_table;
+ }
+ const KProcessPageTable& GetPageTable() const {
+ return m_page_table;
+ }
- /// Unregisters a thread from this process, removing it
- /// from this process' thread list.
- void UnregisterThread(KThread* thread);
+ KHandleTable& GetHandleTable() {
+ return m_handle_table;
+ }
+ const KHandleTable& GetHandleTable() const {
+ return m_handle_table;
+ }
- /// Retrieves the number of available threads for this process.
- u64 GetFreeThreadCount() const;
-
- /// Clears the signaled state of the process if and only if it's signaled.
- ///
- /// @pre The process must not be already terminated. If this is called on a
- /// terminated process, then ResultInvalidState will be returned.
- ///
- /// @pre The process must be in a signaled state. If this is called on a
- /// process instance that is not signaled, ResultInvalidState will be
- /// returned.
- Result Reset();
+ size_t GetUsedUserPhysicalMemorySize() const;
+ size_t GetTotalUserPhysicalMemorySize() const;
+ size_t GetUsedNonSystemUserPhysicalMemorySize() const;
+ size_t GetTotalNonSystemUserPhysicalMemorySize() const;
- /**
- * Loads process-specifics configuration info with metadata provided
- * by an executable.
- *
- * @param metadata The provided metadata to load process specific info from.
- *
- * @returns ResultSuccess if all relevant metadata was able to be
- * loaded and parsed. Otherwise, an error code is returned.
- */
- Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
- bool is_hbl);
-
- /**
- * Starts the main application thread for this process.
- *
- * @param main_thread_priority The priority for the main thread.
- * @param stack_size The stack size for the main thread in bytes.
- */
- void Run(s32 main_thread_priority, u64 stack_size);
-
- /**
- * Prepares a process for termination by stopping all of its threads
- * and clearing any other resources.
- */
- void PrepareForTermination();
+ Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
+ void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
- void LoadModule(CodeSet code_set, KProcessAddress base_addr);
+ Result CreateThreadLocalRegion(KProcessAddress* out);
+ Result DeleteThreadLocalRegion(KProcessAddress addr);
- bool IsInitialized() const override {
- return m_is_initialized;
+ KProcessAddress GetProcessLocalRegionAddress() const {
+ return m_plr_address;
}
- static void PostDestroy(uintptr_t arg) {}
-
- void Finalize() override;
-
- u64 GetId() const override {
- return GetProcessId();
+ KThread* GetExceptionThread() const {
+ return m_exception_thread;
}
- bool IsHbl() const {
- return m_is_hbl;
+ void AddCpuTime(s64 diff) {
+ m_cpu_time += diff;
+ }
+ s64 GetCpuTime() {
+ return m_cpu_time.load();
}
- bool IsSignaled() const override;
-
- void DoWorkerTaskImpl();
+ s64 GetScheduledCount() const {
+ return m_schedule_count;
+ }
+ void IncrementScheduledCount() {
+ ++m_schedule_count;
+ }
- Result SetActivity(ProcessActivity activity);
+ void IncrementRunningThreadCount();
+ void DecrementRunningThreadCount();
- void PinCurrentThread(s32 core_id);
- void UnpinCurrentThread(s32 core_id);
- void UnpinThread(KThread* thread);
+ size_t GetRequiredSecureMemorySizeNonDefault() const {
+ if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->CalculateRequiredSecureMemorySize();
+ }
- KLightLock& GetStateLock() {
- return m_state_lock;
+ return 0;
}
- Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
- void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
-
- ///////////////////////////////////////////////////////////////////////////////////////////////
- // Thread-local storage management
-
- // Marks the next available region as used and returns the address of the slot.
- [[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
+ size_t GetRequiredSecureMemorySize() const {
+ if (m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->CalculateRequiredSecureMemorySize();
+ }
- // Frees a used TLS slot identified by the given address
- Result DeleteThreadLocalRegion(KProcessAddress addr);
+ return 0;
+ }
- ///////////////////////////////////////////////////////////////////////////////////////////////
- // Debug watchpoint management
+ size_t GetTotalSystemResourceSize() const {
+ if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->GetSize();
+ }
- // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
- bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
+ return 0;
+ }
- // Attempts to remove the watchpoint specified by the given parameters.
- bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
+ size_t GetUsedSystemResourceSize() const {
+ if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
+ auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
+ return secure_system_resource->GetUsedSize();
+ }
- const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
- return m_watchpoints;
+ return 0;
}
- const std::string& GetName() {
- return name;
+ void SetRunningThread(s32 core, KThread* thread, u64 idle_count, u64 switch_count) {
+ m_running_threads[core] = thread;
+ m_running_thread_idle_counts[core] = idle_count;
+ m_running_thread_switch_counts[core] = switch_count;
}
-private:
- void PinThread(s32 core_id, KThread* thread) {
- ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
- ASSERT(thread != nullptr);
- ASSERT(m_pinned_threads[core_id] == nullptr);
- m_pinned_threads[core_id] = thread;
+ void ClearRunningThread(KThread* thread) {
+ for (size_t i = 0; i < m_running_threads.size(); ++i) {
+ if (m_running_threads[i] == thread) {
+ m_running_threads[i] = nullptr;
+ }
+ }
}
- void UnpinThread(s32 core_id, KThread* thread) {
- ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
- ASSERT(thread != nullptr);
- ASSERT(m_pinned_threads[core_id] == thread);
- m_pinned_threads[core_id] = nullptr;
+ const KSystemResource& GetSystemResource() const {
+ return *m_system_resource;
}
- void FinalizeHandleTable() {
- // Finalize the table.
- m_handle_table.Finalize();
-
- // Note that the table is finalized.
- m_is_handle_table_initialized = false;
+ const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
+ return m_system_resource->GetMemoryBlockSlabManager();
+ }
+ const KBlockInfoManager& GetBlockInfoManager() const {
+ return m_system_resource->GetBlockInfoManager();
+ }
+ const KPageTableManager& GetPageTableManager() const {
+ return m_system_resource->GetPageTableManager();
}
- void ChangeState(State new_state);
-
- /// Allocates the main thread stack for the process, given the stack size in bytes.
- Result AllocateMainThreadStack(std::size_t stack_size);
-
- /// Memory manager for this process
- KPageTable m_page_table;
+ KThread* GetRunningThread(s32 core) const {
+ return m_running_threads[core];
+ }
+ u64 GetRunningThreadIdleCount(s32 core) const {
+ return m_running_thread_idle_counts[core];
+ }
+ u64 GetRunningThreadSwitchCount(s32 core) const {
+ return m_running_thread_switch_counts[core];
+ }
- /// Current status of the process
- State m_state{};
+ void RegisterThread(KThread* thread);
+ void UnregisterThread(KThread* thread);
- /// The ID of this process
- u64 m_process_id = 0;
+ Result Run(s32 priority, size_t stack_size);
- /// Title ID corresponding to the process
- u64 m_program_id = 0;
+ Result Reset();
- /// Specifies additional memory to be reserved for the process's memory management by the
- /// system. When this is non-zero, secure memory is allocated and used for page table allocation
- /// instead of using the normal global page tables/memory block management.
- u32 m_system_resource_size = 0;
+ void SetDebugBreak() {
+ if (m_state == State::RunningAttached) {
+ this->ChangeState(State::DebugBreak);
+ }
+ }
- /// Resource limit descriptor for this process
- KResourceLimit* m_resource_limit{};
+ void SetAttached() {
+ if (m_state == State::DebugBreak) {
+ this->ChangeState(State::RunningAttached);
+ }
+ }
- KVirtualAddress m_system_resource_address{};
+ Result SetActivity(Svc::ProcessActivity activity);
- /// The ideal CPU core for this process, threads are scheduled on this core by default.
- u8 m_ideal_core = 0;
+ void PinCurrentThread();
+ void UnpinCurrentThread();
+ void UnpinThread(KThread* thread);
- /// Contains the parsed process capability descriptors.
- ProcessCapabilities m_capabilities;
+ void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
+ return m_cond_var.Signal(cv_key, count);
+ }
- /// Whether or not this process is AArch64, or AArch32.
- /// By default, we currently assume this is true, unless otherwise
- /// specified by metadata provided to the process during loading.
- bool m_is_64bit_process = true;
+ Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
+ R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
+ }
- /// Total running time for the process in ticks.
- std::atomic<u64> m_total_process_running_time_ticks = 0;
+ Result SignalAddressArbiter(uintptr_t address, Svc::SignalType signal_type, s32 value,
+ s32 count) {
+ R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
+ }
- /// Per-process handle table for storing created object handles in.
- KHandleTable m_handle_table;
+ Result WaitAddressArbiter(uintptr_t address, Svc::ArbitrationType arb_type, s32 value,
+ s64 timeout) {
+ R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
+ }
- /// Per-process address arbiter.
- KAddressArbiter m_address_arbiter;
+ Result GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids, s32 max_out_count);
- /// The per-process mutex lock instance used for handling various
- /// forms of services, such as lock arbitration, and condition
- /// variable related facilities.
- KConditionVariable m_condition_var;
+ static void Switch(KProcess* cur_process, KProcess* next_process);
- /// Address indicating the location of the process' dedicated TLS region.
- KProcessAddress m_plr_address = 0;
+#ifdef HAS_NCE
+ std::unordered_map<u64, u64>& GetPostHandlers() noexcept {
+ return m_post_handlers;
+ }
+#endif
- /// Address indicating the location of the process's entry point.
- KProcessAddress m_code_address = 0;
+public:
+ // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
+ bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
- /// Random values for svcGetInfo RandomEntropy
- std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
+ // Attempts to remove the watchpoint specified by the given parameters.
+ bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
- /// List of threads that are running with this process as their owner.
- std::list<KThread*> m_thread_list;
+ const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
+ return m_watchpoints;
+ }
- /// List of shared memory that are running with this process as their owner.
- std::list<KSharedMemoryInfo*> m_shared_memory_list;
+public:
+ Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
+ KProcessAddress aslr_space_start, bool is_hbl);
- /// Address of the top of the main thread's stack
- KProcessAddress m_main_thread_stack_top{};
+ void LoadModule(CodeSet code_set, KProcessAddress base_addr);
- /// Size of the main thread's stack
- std::size_t m_main_thread_stack_size{};
+ Core::Memory::Memory& GetMemory() const;
- /// Memory usage capacity for the process
- std::size_t m_memory_usage_capacity{};
+public:
+ // Overridden parent functions.
+ bool IsInitialized() const override {
+ return m_is_initialized;
+ }
- /// Process total image size
- std::size_t m_image_size{};
+ static void PostDestroy(uintptr_t arg) {}
- /// Schedule count of this process
- s64 m_schedule_count{};
+ void Finalize() override;
- size_t m_memory_release_hint{};
+ u64 GetIdImpl() const {
+ return this->GetProcessId();
+ }
+ u64 GetId() const override {
+ return this->GetIdImpl();
+ }
- std::string name{};
+ virtual bool IsSignaled() const override {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
+ return m_is_signaled;
+ }
- bool m_is_signaled{};
- bool m_is_suspended{};
- bool m_is_immortal{};
- bool m_is_handle_table_initialized{};
- bool m_is_initialized{};
- bool m_is_hbl{};
+ void DoWorkerTaskImpl();
- std::atomic<u16> m_num_running_threads{};
+private:
+ void ChangeState(State new_state) {
+ if (m_state != new_state) {
+ m_state = new_state;
+ m_is_signaled = true;
+ this->NotifyAvailable();
+ }
+ }
- std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
- std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
- std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
- std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
- std::map<KProcessAddress, u64> m_debug_page_refcounts;
+ Result InitializeHandleTable(s32 size) {
+ // Try to initialize the handle table.
+ R_TRY(m_handle_table.Initialize(size));
- KThread* m_exception_thread{};
+ // We succeeded, so note that we did.
+ m_is_handle_table_initialized = true;
+ R_SUCCEED();
+ }
- KLightLock m_state_lock;
- KLightLock m_list_lock;
+ void FinalizeHandleTable() {
+ // Finalize the table.
+ m_handle_table.Finalize();
- using TLPTree =
- Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
- using TLPIterator = TLPTree::iterator;
- TLPTree m_fully_used_tlp_tree;
- TLPTree m_partially_used_tlp_tree;
+ // Note that the table is finalized.
+ m_is_handle_table_initialized = false;
+ }
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process_page_table.h b/src/core/hle/kernel/k_process_page_table.h
new file mode 100644
index 000000000..9e40f68bc
--- /dev/null
+++ b/src/core/hle/kernel/k_process_page_table.h
@@ -0,0 +1,481 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_scoped_lock.h"
+#include "core/hle/kernel/svc_types.h"
+
+namespace Core {
+class ARM_Interface;
+}
+
+namespace Kernel {
+
+class KProcessPageTable {
+private:
+ KPageTable m_page_table;
+
+public:
+ KProcessPageTable(KernelCore& kernel) : m_page_table(kernel) {}
+
+ Result Initialize(Svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge,
+ bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address,
+ size_t code_size, KSystemResource* system_resource,
+ KResourceLimit* resource_limit, Core::Memory::Memory& memory,
+ KProcessAddress aslr_space_start) {
+ R_RETURN(m_page_table.InitializeForProcess(
+ as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size,
+ system_resource, resource_limit, memory, aslr_space_start));
+ }
+
+ void Finalize() {
+ m_page_table.Finalize();
+ }
+
+ Core::Memory::Memory& GetMemory() {
+ return m_page_table.GetMemory();
+ }
+
+ Core::Memory::Memory& GetMemory() const {
+ return m_page_table.GetMemory();
+ }
+
+ Common::PageTable& GetImpl() {
+ return m_page_table.GetImpl();
+ }
+
+ Common::PageTable& GetImpl() const {
+ return m_page_table.GetImpl();
+ }
+
+ size_t GetNumGuardPages() const {
+ return m_page_table.GetNumGuardPages();
+ }
+
+ KScopedLightLock AcquireDeviceMapLock() {
+ return m_page_table.AcquireDeviceMapLock();
+ }
+
+ Result SetMemoryPermission(KProcessAddress addr, size_t size, Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
+ }
+
+ Result SetProcessMemoryPermission(KProcessAddress addr, size_t size,
+ Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
+ }
+
+ Result SetMemoryAttribute(KProcessAddress addr, size_t size, KMemoryAttribute mask,
+ KMemoryAttribute attr) {
+ R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
+ }
+
+ Result SetHeapSize(KProcessAddress* out, size_t size) {
+ R_RETURN(m_page_table.SetHeapSize(out, size));
+ }
+
+ Result SetMaxHeapSize(size_t size) {
+ R_RETURN(m_page_table.SetMaxHeapSize(size));
+ }
+
+ Result QueryInfo(KMemoryInfo* out_info, Svc::PageInfo* out_page_info,
+ KProcessAddress addr) const {
+ R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
+ }
+
+ Result QueryPhysicalAddress(Svc::lp64::PhysicalMemoryInfo* out, KProcessAddress address) {
+ R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
+ }
+
+ Result QueryStaticMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+ R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
+ }
+
+ Result QueryIoMapping(KProcessAddress* out, KPhysicalAddress address, size_t size) {
+ R_RETURN(m_page_table.QueryIoMapping(out, address, size));
+ }
+
+ Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
+ }
+
+ Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
+ }
+
+ Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
+ }
+
+ Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
+ }
+
+ Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
+ }
+
+ Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping, Svc::MemoryPermission perm) {
+ R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
+ }
+
+ Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size,
+ Svc::MemoryMapping mapping) {
+ R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping));
+ }
+
+ Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
+ }
+
+ Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapRegion(region_type, perm));
+ }
+
+ Result MapInsecureMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapInsecureMemory(address, size));
+ }
+
+ Result UnmapInsecureMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapInsecureMemory(address, size));
+ }
+
+ Result MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
+ }
+
+ Result UnmapPageGroup(KProcessAddress address, const KPageGroup& pg, KMemoryState state) {
+ R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
+ KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
+ }
+
+ Result MapPages(KProcessAddress* out_addr, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
+ }
+
+ Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
+ }
+
+ Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
+ R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
+ }
+
+ Result MakeAndOpenPageGroup(KPageGroup* out, KProcessAddress address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state,
+ perm_mask, perm, attr_mask, attr));
+ }
+
+ Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
+ }
+
+ Result ReadDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.ReadDebugMemory(dst_address, src_address, size));
+ }
+
+ Result ReadDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state) {
+ R_RETURN(m_page_table.ReadDebugIoMemory(dst_address, src_address, size, state));
+ }
+
+ Result WriteDebugMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
+ R_RETURN(m_page_table.WriteDebugMemory(dst_address, src_address, size));
+ }
+
+ Result WriteDebugIoMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size,
+ KMemoryState state) {
+ R_RETURN(m_page_table.WriteDebugIoMemory(dst_address, src_address, size, state));
+ }
+
+ Result LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned, bool check_heap) {
+ R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm,
+ is_aligned, check_heap));
+ }
+
+ Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
+ R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap));
+ }
+
+ Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
+ }
+
+ Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
+ }
+
+ Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size,
+ KMemoryPermission perm, bool is_aligned) {
+ R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm,
+ is_aligned));
+ }
+
+ Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
+ }
+
+ Result LockForIpcUserBuffer(KPhysicalAddress* out, KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
+ }
+
+ Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
+ }
+
+ Result LockForTransferMemory(KPageGroup* out, KProcessAddress address, size_t size,
+ KMemoryPermission perm) {
+ R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
+ }
+
+ Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+ R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
+ }
+
+ Result LockForCodeMemory(KPageGroup* out, KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
+ }
+
+ Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup& pg) {
+ R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
+ }
+
+ Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange* out,
+ KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
+ }
+
+ Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask,
+ src_state, src_test_perm, src_attr_mask,
+ src_attr));
+ }
+
+ Result CopyMemoryFromLinearToKernel(void* dst_addr, size_t size, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state,
+ KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask,
+ src_state, src_test_perm, src_attr_mask,
+ src_attr));
+ }
+
+ Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr) {
+ R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state,
+ dst_test_perm, dst_attr_mask, dst_attr,
+ src_addr));
+ }
+
+ Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state,
+ KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ void* src_addr) {
+ R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask,
+ dst_state, dst_test_perm, dst_attr_mask,
+ dst_attr, src_addr));
+ }
+
+ Result CopyMemoryFromHeapToHeap(KProcessPageTable& dst_page_table, KProcessAddress dst_addr,
+ size_t size, KMemoryState dst_state_mask,
+ KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr,
+ KProcessAddress src_addr, KMemoryState src_state_mask,
+ KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(
+ dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+ dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+ src_attr_mask, src_attr));
+ }
+
+ Result CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ KProcessPageTable& dst_page_table, KProcessAddress dst_addr, size_t size,
+ KMemoryState dst_state_mask, KMemoryState dst_state, KMemoryPermission dst_test_perm,
+ KMemoryAttribute dst_attr_mask, KMemoryAttribute dst_attr, KProcessAddress src_addr,
+ KMemoryState src_state_mask, KMemoryState src_state, KMemoryPermission src_test_perm,
+ KMemoryAttribute src_attr_mask, KMemoryAttribute src_attr) {
+ R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(
+ dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm,
+ dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm,
+ src_attr_mask, src_attr));
+ }
+
+ Result SetupForIpc(KProcessAddress* out_dst_addr, size_t size, KProcessAddress src_addr,
+ KProcessPageTable& src_page_table, KMemoryPermission test_perm,
+ KMemoryState dst_state, bool send) {
+ R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table,
+ test_perm, dst_state, send));
+ }
+
+ Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
+ R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
+ }
+
+ Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
+ R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
+ }
+
+ Result MapPhysicalMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapPhysicalMemory(address, size));
+ }
+
+ Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
+ }
+
+ Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
+ }
+
+ Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
+ R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
+ }
+
+ Result UnmapProcessMemory(KProcessAddress dst_address, size_t size,
+ KProcessPageTable& src_page_table, KProcessAddress src_address) {
+ R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table,
+ src_address));
+ }
+
+ bool GetPhysicalAddress(KPhysicalAddress* out, KProcessAddress address) {
+ return m_page_table.GetPhysicalAddress(out, address);
+ }
+
+ bool Contains(KProcessAddress addr, size_t size) const {
+ return m_page_table.Contains(addr, size);
+ }
+
+ bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInAliasRegion(addr, size);
+ }
+ bool IsInHeapRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInHeapRegion(addr, size);
+ }
+ bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
+ return m_page_table.IsInUnsafeAliasRegion(addr, size);
+ }
+
+ bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
+ return m_page_table.CanContain(addr, size, state);
+ }
+
+ KProcessAddress GetAddressSpaceStart() const {
+ return m_page_table.GetAddressSpaceStart();
+ }
+ KProcessAddress GetHeapRegionStart() const {
+ return m_page_table.GetHeapRegionStart();
+ }
+ KProcessAddress GetAliasRegionStart() const {
+ return m_page_table.GetAliasRegionStart();
+ }
+ KProcessAddress GetStackRegionStart() const {
+ return m_page_table.GetStackRegionStart();
+ }
+ KProcessAddress GetKernelMapRegionStart() const {
+ return m_page_table.GetKernelMapRegionStart();
+ }
+ KProcessAddress GetCodeRegionStart() const {
+ return m_page_table.GetCodeRegionStart();
+ }
+ KProcessAddress GetAliasCodeRegionStart() const {
+ return m_page_table.GetAliasCodeRegionStart();
+ }
+
+ size_t GetAddressSpaceSize() const {
+ return m_page_table.GetAddressSpaceSize();
+ }
+ size_t GetHeapRegionSize() const {
+ return m_page_table.GetHeapRegionSize();
+ }
+ size_t GetAliasRegionSize() const {
+ return m_page_table.GetAliasRegionSize();
+ }
+ size_t GetStackRegionSize() const {
+ return m_page_table.GetStackRegionSize();
+ }
+ size_t GetKernelMapRegionSize() const {
+ return m_page_table.GetKernelMapRegionSize();
+ }
+ size_t GetCodeRegionSize() const {
+ return m_page_table.GetCodeRegionSize();
+ }
+ size_t GetAliasCodeRegionSize() const {
+ return m_page_table.GetAliasCodeRegionSize();
+ }
+
+ size_t GetNormalMemorySize() const {
+ return m_page_table.GetNormalMemorySize();
+ }
+
+ size_t GetCodeSize() const {
+ return m_page_table.GetCodeSize();
+ }
+ size_t GetCodeDataSize() const {
+ return m_page_table.GetCodeDataSize();
+ }
+
+ size_t GetAliasCodeSize() const {
+ return m_page_table.GetAliasCodeSize();
+ }
+ size_t GetAliasCodeDataSize() const {
+ return m_page_table.GetAliasCodeDataSize();
+ }
+
+ u32 GetAllocateOption() const {
+ return m_page_table.GetAllocateOption();
+ }
+
+ u32 GetAddressSpaceWidth() const {
+ return m_page_table.GetAddressSpaceWidth();
+ }
+
+ KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) {
+ return m_page_table.GetHeapPhysicalAddress(address);
+ }
+
+ u8* GetHeapVirtualPointer(KPhysicalAddress address) {
+ return m_page_table.GetHeapVirtualPointer(address);
+ }
+
+ KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) {
+ return m_page_table.GetHeapVirtualAddress(address);
+ }
+
+ KBlockInfoManager* GetBlockInfoManager() {
+ return m_page_table.GetBlockInfoManager();
+ }
+
+ KPageTable& GetBasePageTable() {
+ return m_page_table;
+ }
+
+ const KPageTable& GetBasePageTable() const {
+ return m_page_table;
+ }
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index d8143c650..1bce63a56 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -190,7 +190,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
if (m_state.should_count_idle) {
if (highest_thread != nullptr) [[likely]] {
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
- process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
+ process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count, 0);
}
} else {
m_state.idle_count++;
@@ -356,7 +356,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(m_core_id, tick_diff);
if (cur_process != nullptr) {
- cur_process->UpdateCPUTimeTicks(tick_diff);
+ cur_process->AddCpuTime(tick_diff);
}
m_last_context_switch_time = cur_tick;
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index c64ceb530..3ea653163 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -383,7 +383,7 @@ Result KServerSession::SendReply(bool is_hle) {
if (event != nullptr) {
// // Get the client process/page table.
// KProcess *client_process = client_thread->GetOwnerProcess();
- // KPageTable *client_page_table = std::addressof(client_process->PageTable());
+ // KProcessPageTable *client_page_table = std::addressof(client_process->PageTable());
// // If we need to, reply with an async error.
// if (R_FAILED(client_result)) {
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index e6c8d589a..b51941faf 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -1,25 +1,100 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "core/core.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_system_resource.h"
namespace Kernel {
Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
KMemoryManager::Pool pool) {
- // Unimplemented
- UNREACHABLE();
+ // Set members.
+ m_resource_limit = resource_limit;
+ m_resource_size = size;
+ m_resource_pool = pool;
+
+ // Determine required size for our secure resource.
+ const size_t secure_size = this->CalculateRequiredSecureMemorySize();
+
+ // Reserve memory for our secure resource.
+ KScopedResourceReservation memory_reservation(
+ m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, secure_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate secure memory.
+ R_TRY(KSystemControl::AllocateSecureMemory(m_kernel, std::addressof(m_resource_address),
+ m_resource_size, static_cast<u32>(m_resource_pool)));
+ ASSERT(m_resource_address != 0);
+
+ // Ensure we clean up the secure memory, if we fail past this point.
+ ON_RESULT_FAILURE {
+ KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
+ static_cast<u32>(m_resource_pool));
+ };
+
+ // Check that our allocation is bigger than the reference counts needed for it.
+ const size_t rc_size =
+ Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(m_resource_size), PageSize);
+ R_UNLESS(m_resource_size > rc_size, ResultOutOfMemory);
+
+ // Get resource pointer.
+ KPhysicalAddress resource_paddr =
+ KPageTable::GetHeapPhysicalAddress(m_kernel, m_resource_address);
+ auto* resource =
+ m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
+
+ // Initialize slab heaps.
+ m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size,
+ PageSize);
+ m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, resource);
+ m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
+ m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
+
+ // Initialize managers.
+ m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager),
+ std::addressof(m_page_table_heap));
+ m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager),
+ std::addressof(m_memory_block_heap));
+ m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager),
+ std::addressof(m_block_info_heap));
+
+ // Set our managers.
+ this->SetManagers(m_memory_block_slab_manager, m_block_info_manager, m_page_table_manager);
+
+ // Commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Open reference to our resource limit.
+ m_resource_limit->Open();
+
+ // Set ourselves as initialized.
+ m_is_initialized = true;
+
+ R_SUCCEED();
}
void KSecureSystemResource::Finalize() {
- // Unimplemented
- UNREACHABLE();
+ // Check that we have no outstanding allocations.
+ ASSERT(m_memory_block_slab_manager.GetUsed() == 0);
+ ASSERT(m_block_info_manager.GetUsed() == 0);
+ ASSERT(m_page_table_manager.GetUsed() == 0);
+
+ // Free our secure memory.
+ KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
+ static_cast<u32>(m_resource_pool));
+
+ // Release the memory reservation.
+ m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
+ this->CalculateRequiredSecureMemorySize());
+
+ // Close reference to our resource limit.
+ m_resource_limit->Close();
}
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
KMemoryManager::Pool pool) {
- // Unimplemented
- UNREACHABLE();
+ return KSystemControl::CalculateRequiredSecureMemorySize(size, static_cast<u32>(pool));
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 7df8fd7f7..a6deb50ec 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -122,16 +122,15 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
case ThreadType::Main:
ASSERT(arg == 0);
[[fallthrough]];
- case ThreadType::HighPriority:
- [[fallthrough]];
- case ThreadType::Dummy:
- [[fallthrough]];
case ThreadType::User:
ASSERT(((owner == nullptr) ||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
break;
+ case ThreadType::HighPriority:
+ case ThreadType::Dummy:
+ break;
case ThreadType::Kernel:
UNIMPLEMENTED();
break;
@@ -216,6 +215,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
// Setup the TLS, if needed.
if (type == ThreadType::User) {
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
+ owner->GetMemory().ZeroBlock(m_tls_address, Svc::ThreadLocalRegionSize);
}
m_parent = owner;
@@ -403,7 +403,7 @@ void KThread::StartTermination() {
if (m_parent != nullptr) {
m_parent->ReleaseUserException(this);
if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
- m_parent->UnpinCurrentThread(m_core_id);
+ m_parent->UnpinCurrentThread();
}
}
@@ -415,10 +415,6 @@ void KThread::StartTermination() {
m_parent->ClearRunningThread(this);
}
- // Signal.
- m_signaled = true;
- KSynchronizationObject::NotifyAvailable();
-
// Clear previous thread in KScheduler.
KScheduler::ClearPreviousThread(m_kernel, this);
@@ -437,6 +433,13 @@ void KThread::FinishTermination() {
}
}
+ // Acquire the scheduler lock.
+ KScopedSchedulerLock sl{m_kernel};
+
+ // Signal.
+ m_signaled = true;
+ KSynchronizationObject::NotifyAvailable();
+
// Close the thread.
this->Close();
}
@@ -820,7 +823,7 @@ void KThread::CloneFpuStatus() {
ASSERT(this->GetOwnerProcess() != nullptr);
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
- if (this->GetOwnerProcess()->Is64BitProcess()) {
+ if (this->GetOwnerProcess()->Is64Bit()) {
// Clone FPSR and FPCR.
ThreadContext64 cur_ctx{};
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
@@ -923,7 +926,7 @@ Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
// If we're not terminating, get the thread's user context.
if (!this->IsTerminationRequested()) {
- if (m_parent->Is64BitProcess()) {
+ if (m_parent->Is64Bit()) {
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
auto context = GetContext64();
context.pstate &= 0xFF0FFE20;
@@ -1174,6 +1177,9 @@ Result KThread::Run() {
owner->IncrementRunningThreadCount();
}
+ // Open a reference, now that we're running.
+ this->Open();
+
// Set our state and finish.
this->SetState(ThreadState::Runnable);
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index d178c2453..e9ca5dfca 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -655,6 +655,21 @@ public:
return m_stack_top;
}
+public:
+ // TODO: This shouldn't be defined in kernel namespace
+ struct NativeExecutionParameters {
+ u64 tpidr_el0{};
+ u64 tpidrro_el0{};
+ void* native_context{};
+ std::atomic<u32> lock{1};
+ bool is_running{};
+ u32 magic{Common::MakeMagic('Y', 'U', 'Z', 'U')};
+ };
+
+ NativeExecutionParameters& GetNativeExecutionParameters() {
+ return m_native_execution_parameters;
+ }
+
private:
KThread* RemoveWaiterByKey(bool* out_has_waiters, KProcessAddress key,
bool is_kernel_address_key);
@@ -721,6 +736,7 @@ private:
// For core KThread implementation
ThreadContext32 m_thread_context_32{};
ThreadContext64 m_thread_context_64{};
+ Common::IntrusiveListNode m_process_list_node;
Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
s32 m_priority{};
using ConditionVariableThreadTreeTraits =
@@ -913,6 +929,7 @@ private:
ThreadWaitReasonForDebugging m_wait_reason_for_debugging{};
uintptr_t m_argument{};
KProcessAddress m_stack_top{};
+ NativeExecutionParameters m_native_execution_parameters{};
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
index 2c45b4232..a632d1634 100644
--- a/src/core/hle/kernel/k_thread_local_page.cpp
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -37,8 +37,8 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
Result KThreadLocalPage::Finalize() {
// Get the physical address of the page.
- const KPhysicalAddress phys_addr = m_owner->GetPageTable().GetPhysicalAddr(m_virt_addr);
- ASSERT(phys_addr);
+ KPhysicalAddress phys_addr{};
+ ASSERT(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), m_virt_addr));
// Unmap the page.
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 24433d32b..4a1559291 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -101,35 +101,31 @@ struct KernelCore::Impl {
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id]->Initialize((*application_process).Is64BitProcess());
+ cores[core_id]->Initialize((*application_process).Is64Bit());
system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
}
}
- void CloseApplicationProcess() {
- KProcess* old_process = application_process.exchange(nullptr);
- if (old_process == nullptr) {
- return;
- }
-
- // old_process->Close();
- // TODO: The process should be destroyed based on accurate ref counting after
- // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
- old_process->Finalize();
- old_process->Destroy();
+ void TerminateApplicationProcess() {
+ application_process.load()->Terminate();
}
void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed);
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
- process_list.clear();
-
CloseServices();
+ auto* old_process = application_process.exchange(nullptr);
+ if (old_process) {
+ old_process->Close();
+ }
+
+ process_list.clear();
+
next_object_id = 0;
- next_kernel_process_id = KProcess::InitialKIPIDMin;
- next_user_process_id = KProcess::ProcessIDMin;
+ next_kernel_process_id = KProcess::InitialProcessIdMin;
+ next_user_process_id = KProcess::ProcessIdMin;
next_thread_id = 1;
global_handle_table->Finalize();
@@ -176,8 +172,6 @@ struct KernelCore::Impl {
}
}
- CloseApplicationProcess();
-
// Track kernel objects that were not freed on shutdown
{
std::scoped_lock lk{registered_objects_lock};
@@ -344,6 +338,8 @@ struct KernelCore::Impl {
// Create the system page table managers.
app_system_resource = std::make_unique<KSystemResource>(kernel);
sys_system_resource = std::make_unique<KSystemResource>(kernel);
+ KAutoObject::Create(std::addressof(*app_system_resource));
+ KAutoObject::Create(std::addressof(*sys_system_resource));
// Set the managers for the system resources.
app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
@@ -792,8 +788,8 @@ struct KernelCore::Impl {
std::mutex registered_in_use_objects_lock;
std::atomic<u32> next_object_id{0};
- std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
- std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
+ std::atomic<u64> next_kernel_process_id{KProcess::InitialProcessIdMin};
+ std::atomic<u64> next_user_process_id{KProcess::ProcessIdMin};
std::atomic<u64> next_thread_id{1};
// Lists all processes that exist in the current session.
@@ -924,10 +920,6 @@ const KProcess* KernelCore::ApplicationProcess() const {
return impl->application_process;
}
-void KernelCore::CloseApplicationProcess() {
- impl->CloseApplicationProcess();
-}
-
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list;
}
@@ -1128,8 +1120,8 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
std::function<void()> func) {
// Make a new process.
KProcess* process = KProcess::Create(*this);
- ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
- GetSystemResourceLimit())));
+ ASSERT(R_SUCCEEDED(
+ process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
SCOPE_EXIT({ process->Close(); });
@@ -1156,8 +1148,8 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
// Make a new process.
KProcess* process = KProcess::Create(*this);
- ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
- GetSystemResourceLimit())));
+ ASSERT(R_SUCCEEDED(
+ process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
SCOPE_EXIT({ process->Close(); });
@@ -1266,7 +1258,8 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
void KernelCore::SuspendApplication(bool suspended) {
const bool should_suspend{exception_exited || suspended};
- const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
+ const auto activity =
+ should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
// Get the application process.
KScopedAutoObject<KProcess> process = ApplicationProcess();
@@ -1300,6 +1293,8 @@ void KernelCore::SuspendApplication(bool suspended) {
}
void KernelCore::ShutdownCores() {
+ impl->TerminateApplicationProcess();
+
KScopedSchedulerLock lk{*this};
for (auto* thread : impl->shutdown_threads) {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index d5b08eeb5..d8086c0ea 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -134,9 +134,6 @@ public:
/// Retrieves a const pointer to the application process.
const KProcess* ApplicationProcess() const;
- /// Closes the application process.
- void CloseApplicationProcess();
-
/// Retrieves the list of processes.
const std::vector<KProcess*>& GetProcessList() const;
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 5ee869fa2..073039825 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -1,8 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "common/settings.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
+#ifdef HAS_NCE
+#include "core/arm/nce/arm_nce.h"
+#endif
#include "core/core.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
@@ -14,7 +18,8 @@ PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KSchedu
: m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
- // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
+ // an NCE interface or a 32-bit instance of Dynarmic. This should be abstracted out to a CPU
+ // manager.
auto& kernel = system.Kernel();
m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
system, kernel.IsMulticore(),
@@ -28,6 +33,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KSchedu
PhysicalCore::~PhysicalCore() = default;
void PhysicalCore::Initialize(bool is_64_bit) {
+#if defined(HAS_NCE)
+ if (Settings::IsNceEnabled()) {
+ m_arm_interface = std::make_unique<Core::ARM_NCE>(m_system, m_system.Kernel().IsMulticore(),
+ m_core_index);
+ return;
+ }
+#endif
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
auto& kernel = m_system.Kernel();
if (!is_64_bit) {
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
deleted file mode 100644
index 773319ad8..000000000
--- a/src/core/hle/kernel/process_capability.cpp
+++ /dev/null
@@ -1,389 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <bit>
-
-#include "common/bit_util.h"
-#include "common/logging/log.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_page_table.h"
-#include "core/hle/kernel/process_capability.h"
-#include "core/hle/kernel/svc_results.h"
-
-namespace Kernel {
-namespace {
-
-// clang-format off
-
-// Shift offsets for kernel capability types.
-enum : u32 {
- CapabilityOffset_PriorityAndCoreNum = 3,
- CapabilityOffset_Syscall = 4,
- CapabilityOffset_MapPhysical = 6,
- CapabilityOffset_MapIO = 7,
- CapabilityOffset_MapRegion = 10,
- CapabilityOffset_Interrupt = 11,
- CapabilityOffset_ProgramType = 13,
- CapabilityOffset_KernelVersion = 14,
- CapabilityOffset_HandleTableSize = 15,
- CapabilityOffset_Debug = 16,
-};
-
-// Combined mask of all parameters that may be initialized only once.
-constexpr u32 InitializeOnceMask = (1U << CapabilityOffset_PriorityAndCoreNum) |
- (1U << CapabilityOffset_ProgramType) |
- (1U << CapabilityOffset_KernelVersion) |
- (1U << CapabilityOffset_HandleTableSize) |
- (1U << CapabilityOffset_Debug);
-
-// Packed kernel version indicating 10.4.0
-constexpr u32 PackedKernelVersion = 0x520000;
-
-// Indicates possible types of capabilities that can be specified.
-enum class CapabilityType : u32 {
- Unset = 0U,
- PriorityAndCoreNum = (1U << CapabilityOffset_PriorityAndCoreNum) - 1,
- Syscall = (1U << CapabilityOffset_Syscall) - 1,
- MapPhysical = (1U << CapabilityOffset_MapPhysical) - 1,
- MapIO = (1U << CapabilityOffset_MapIO) - 1,
- MapRegion = (1U << CapabilityOffset_MapRegion) - 1,
- Interrupt = (1U << CapabilityOffset_Interrupt) - 1,
- ProgramType = (1U << CapabilityOffset_ProgramType) - 1,
- KernelVersion = (1U << CapabilityOffset_KernelVersion) - 1,
- HandleTableSize = (1U << CapabilityOffset_HandleTableSize) - 1,
- Debug = (1U << CapabilityOffset_Debug) - 1,
- Ignorable = 0xFFFFFFFFU,
-};
-
-// clang-format on
-
-constexpr CapabilityType GetCapabilityType(u32 value) {
- return static_cast<CapabilityType>((~value & (value + 1)) - 1);
-}
-
-u32 GetFlagBitOffset(CapabilityType type) {
- const auto value = static_cast<u32>(type);
- return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
-}
-
-} // Anonymous namespace
-
-Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
- Clear();
-
- // Allow all cores and priorities.
- core_mask = 0xF;
- priority_mask = 0xFFFFFFFFFFFFFFFF;
- kernel_version = PackedKernelVersion;
-
- return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
- Clear();
-
- return ParseCapabilities(capabilities, num_capabilities, page_table);
-}
-
-void ProcessCapabilities::InitializeForMetadatalessProcess() {
- // Allow all cores and priorities
- core_mask = 0xF;
- priority_mask = 0xFFFFFFFFFFFFFFFF;
- kernel_version = PackedKernelVersion;
-
- // Allow all system calls and interrupts.
- svc_capabilities.set();
- interrupt_capabilities.set();
-
- // Allow using the maximum possible amount of handles
- handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize);
-
- // Allow all debugging capabilities.
- is_debuggable = true;
- can_force_debug = true;
-}
-
-Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table) {
- u32 set_flags = 0;
- u32 set_svc_bits = 0;
-
- for (std::size_t i = 0; i < num_capabilities; ++i) {
- const u32 descriptor = capabilities[i];
- const auto type = GetCapabilityType(descriptor);
-
- if (type == CapabilityType::MapPhysical) {
- i++;
-
- // The MapPhysical type uses two descriptor flags for its parameters.
- // If there's only one, then there's a problem.
- if (i >= num_capabilities) {
- LOG_ERROR(Kernel, "Invalid combination! i={}", i);
- return ResultInvalidCombination;
- }
-
- const auto size_flags = capabilities[i];
- if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
- LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
- return ResultInvalidCombination;
- }
-
- const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
- if (result.IsError()) {
- LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
- descriptor, size_flags);
- return result;
- }
- } else {
- const auto result =
- ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
- if (result.IsError()) {
- LOG_ERROR(
- Kernel,
- "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
- set_flags, set_svc_bits, descriptor);
- return result;
- }
- }
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table) {
- const auto type = GetCapabilityType(flag);
-
- if (type == CapabilityType::Unset) {
- return ResultInvalidArgument;
- }
-
- // Bail early on ignorable entries, as one would expect,
- // ignorable descriptors can be ignored.
- if (type == CapabilityType::Ignorable) {
- return ResultSuccess;
- }
-
- // Ensure that the give flag hasn't already been initialized before.
- // If it has been, then bail.
- const u32 flag_length = GetFlagBitOffset(type);
- const u32 set_flag = 1U << flag_length;
- if ((set_flag & set_flags & InitializeOnceMask) != 0) {
- LOG_ERROR(Kernel,
- "Attempted to initialize flags that may only be initialized once. set_flags={}",
- set_flags);
- return ResultInvalidCombination;
- }
- set_flags |= set_flag;
-
- switch (type) {
- case CapabilityType::PriorityAndCoreNum:
- return HandlePriorityCoreNumFlags(flag);
- case CapabilityType::Syscall:
- return HandleSyscallFlags(set_svc_bits, flag);
- case CapabilityType::MapIO:
- return HandleMapIOFlags(flag, page_table);
- case CapabilityType::MapRegion:
- return HandleMapRegionFlags(flag, page_table);
- case CapabilityType::Interrupt:
- return HandleInterruptFlags(flag);
- case CapabilityType::ProgramType:
- return HandleProgramTypeFlags(flag);
- case CapabilityType::KernelVersion:
- return HandleKernelVersionFlags(flag);
- case CapabilityType::HandleTableSize:
- return HandleHandleTableFlags(flag);
- case CapabilityType::Debug:
- return HandleDebugFlags(flag);
- default:
- break;
- }
-
- LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
- return ResultInvalidArgument;
-}
-
-void ProcessCapabilities::Clear() {
- svc_capabilities.reset();
- interrupt_capabilities.reset();
-
- core_mask = 0;
- priority_mask = 0;
-
- handle_table_size = 0;
- kernel_version = 0;
-
- program_type = ProgramType::SysModule;
-
- is_debuggable = false;
- can_force_debug = false;
-}
-
-Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
- if (priority_mask != 0 || core_mask != 0) {
- LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
- priority_mask, core_mask);
- return ResultInvalidArgument;
- }
-
- const u32 core_num_min = (flags >> 16) & 0xFF;
- const u32 core_num_max = (flags >> 24) & 0xFF;
- if (core_num_min > core_num_max) {
- LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
- core_num_min, core_num_max);
- return ResultInvalidCombination;
- }
-
- const u32 priority_min = (flags >> 10) & 0x3F;
- const u32 priority_max = (flags >> 4) & 0x3F;
- if (priority_min > priority_max) {
- LOG_ERROR(Kernel,
- "Priority min is greater than priority max! priority_min={}, priority_max={}",
- core_num_min, priority_max);
- return ResultInvalidCombination;
- }
-
- // The switch only has 4 usable cores.
- if (core_num_max >= 4) {
- LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
- return ResultInvalidCoreId;
- }
-
- const auto make_mask = [](u64 min, u64 max) {
- const u64 range = max - min + 1;
- const u64 mask = (1ULL << range) - 1;
-
- return mask << min;
- };
-
- core_mask = make_mask(core_num_min, core_num_max);
- priority_mask = make_mask(priority_min, priority_max);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
- const u32 index = flags >> 29;
- const u32 svc_bit = 1U << index;
-
- // If we've already set this svc before, bail.
- if ((set_svc_bits & svc_bit) != 0) {
- return ResultInvalidCombination;
- }
- set_svc_bits |= svc_bit;
-
- const u32 svc_mask = (flags >> 5) & 0xFFFFFF;
- for (u32 i = 0; i < 24; ++i) {
- const u32 svc_number = index * 24 + i;
-
- if ((svc_mask & (1U << i)) == 0) {
- continue;
- }
-
- svc_capabilities[svc_number] = true;
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
- KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
- // TODO(Lioncache): Implement once the memory manager can handle this.
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
- constexpr u32 interrupt_ignore_value = 0x3FF;
- const u32 interrupt0 = (flags >> 12) & 0x3FF;
- const u32 interrupt1 = (flags >> 22) & 0x3FF;
-
- for (u32 interrupt : {interrupt0, interrupt1}) {
- if (interrupt == interrupt_ignore_value) {
- continue;
- }
-
- // NOTE:
- // This should be checking a generic interrupt controller value
- // as part of the calculation, however, given we don't currently
- // emulate that, it's sufficient to mark every interrupt as defined.
-
- if (interrupt >= interrupt_capabilities.size()) {
- LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
- interrupt);
- return ResultOutOfRange;
- }
-
- interrupt_capabilities[interrupt] = true;
- }
-
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
- const u32 reserved = flags >> 17;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
- // Yes, the internal member variable is checked in the actual kernel here.
- // This might look odd for options that are only allowed to be initialized
- // just once, however the kernel has a separate initialization function for
- // kernel processes and userland processes. The kernel variant sets this
- // member variable ahead of time.
-
- const u32 major_version = kernel_version >> 19;
-
- if (major_version != 0 || flags < 0x80000) {
- LOG_ERROR(Kernel,
- "Kernel version is non zero or flags are too small! major_version={}, flags={}",
- major_version, flags);
- return ResultInvalidArgument;
- }
-
- kernel_version = flags;
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
- const u32 reserved = flags >> 26;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
- return ResultSuccess;
-}
-
-Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
- const u32 reserved = flags >> 19;
- if (reserved != 0) {
- LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
- return ResultReservedUsed;
- }
-
- is_debuggable = (flags & 0x20000) != 0;
- can_force_debug = (flags & 0x40000) != 0;
- return ResultSuccess;
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
deleted file mode 100644
index ff05dc5ff..000000000
--- a/src/core/hle/kernel/process_capability.h
+++ /dev/null
@@ -1,266 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <bitset>
-
-#include "common/common_types.h"
-
-union Result;
-
-namespace Kernel {
-
-class KPageTable;
-
-/// The possible types of programs that may be indicated
-/// by the program type capability descriptor.
-enum class ProgramType {
- SysModule,
- Application,
- Applet,
-};
-
-/// Handles kernel capability descriptors that are provided by
-/// application metadata. These descriptors provide information
-/// that alters certain parameters for kernel process instance
-/// that will run said application (or applet).
-///
-/// Capabilities are a sequence of flag descriptors, that indicate various
-/// configurations and constraints for a particular process.
-///
-/// Flag types are indicated by a sequence of set low bits. E.g. the
-/// types are indicated with the low bits as follows (where x indicates "don't care"):
-///
-/// - Priority and core mask : 0bxxxxxxxxxxxx0111
-/// - Allowed service call mask: 0bxxxxxxxxxxx01111
-/// - Map physical memory : 0bxxxxxxxxx0111111
-/// - Map IO memory : 0bxxxxxxxx01111111
-/// - Interrupts : 0bxxxx011111111111
-/// - Application type : 0bxx01111111111111
-/// - Kernel version : 0bx011111111111111
-/// - Handle table size : 0b0111111111111111
-/// - Debugger flags : 0b1111111111111111
-///
-/// These are essentially a bit offset subtracted by 1 to create a mask.
-/// e.g. The first entry in the above list is simply bit 3 (value 8 -> 0b1000)
-/// subtracted by one (7 -> 0b0111)
-///
-/// An example of a bit layout (using the map physical layout):
-/// <example>
-/// The MapPhysical type indicates a sequence entry pair of:
-///
-/// [initial, memory_flags], where:
-///
-/// initial:
-/// bits:
-/// 7-24: Starting page to map memory at.
-/// 25 : Indicates if the memory should be mapped as read only.
-///
-/// memory_flags:
-/// bits:
-/// 7-20 : Number of pages to map
-/// 21-25: Seems to be reserved (still checked against though)
-/// 26 : Whether or not the memory being mapped is IO memory, or physical memory
-/// </example>
-///
-class ProcessCapabilities {
-public:
- using InterruptCapabilities = std::bitset<1024>;
- using SyscallCapabilities = std::bitset<192>;
-
- ProcessCapabilities() = default;
- ProcessCapabilities(const ProcessCapabilities&) = delete;
- ProcessCapabilities(ProcessCapabilities&&) = default;
-
- ProcessCapabilities& operator=(const ProcessCapabilities&) = delete;
- ProcessCapabilities& operator=(ProcessCapabilities&&) = default;
-
- /// Initializes this process capabilities instance for a kernel process.
- ///
- /// @param capabilities The capabilities to parse
- /// @param num_capabilities The number of capabilities to parse.
- /// @param page_table The memory manager to use for handling any mapping-related
- /// operations (such as mapping IO memory, etc).
- ///
- /// @returns ResultSuccess if this capabilities instance was able to be initialized,
- /// otherwise, an error code upon failure.
- ///
- Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Initializes this process capabilities instance for a userland process.
- ///
- /// @param capabilities The capabilities to parse.
- /// @param num_capabilities The total number of capabilities to parse.
- /// @param page_table The memory manager to use for handling any mapping-related
- /// operations (such as mapping IO memory, etc).
- ///
- /// @returns ResultSuccess if this capabilities instance was able to be initialized,
- /// otherwise, an error code upon failure.
- ///
- Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Initializes this process capabilities instance for a process that does not
- /// have any metadata to parse.
- ///
- /// This is necessary, as we allow running raw executables, and the internal
- /// kernel process capabilities also determine what CPU cores the process is
- /// allowed to run on, and what priorities are allowed for threads. It also
- /// determines the max handle table size, what the program type is, whether or
- /// not the process can be debugged, or whether it's possible for a process to
- /// forcibly debug another process.
- ///
- /// Given the above, this essentially enables all capabilities across the board
- /// for the process. It allows the process to:
- ///
- /// - Run on any core
- /// - Use any thread priority
- /// - Use the maximum amount of handles a process is allowed to.
- /// - Be debuggable
- /// - Forcibly debug other processes.
- ///
- /// Note that this is not a behavior that the kernel allows a process to do via
- /// a single function like this. This is yuzu-specific behavior to handle
- /// executables with no capability descriptors whatsoever to derive behavior from.
- /// It being yuzu-specific is why this is also not the default behavior and not
- /// done by default in the constructor.
- ///
- void InitializeForMetadatalessProcess();
-
- /// Gets the allowable core mask
- u64 GetCoreMask() const {
- return core_mask;
- }
-
- /// Gets the allowable priority mask
- u64 GetPriorityMask() const {
- return priority_mask;
- }
-
- /// Gets the SVC access permission bits
- const SyscallCapabilities& GetServiceCapabilities() const {
- return svc_capabilities;
- }
-
- /// Gets the valid interrupt bits.
- const InterruptCapabilities& GetInterruptCapabilities() const {
- return interrupt_capabilities;
- }
-
- /// Gets the program type for this process.
- ProgramType GetProgramType() const {
- return program_type;
- }
-
- /// Gets the number of total allowable handles for the process' handle table.
- s32 GetHandleTableSize() const {
- return handle_table_size;
- }
-
- /// Gets the kernel version value.
- u32 GetKernelVersion() const {
- return kernel_version;
- }
-
- /// Whether or not this process can be debugged.
- bool IsDebuggable() const {
- return is_debuggable;
- }
-
- /// Whether or not this process can forcibly debug another
- /// process, even if that process is not considered debuggable.
- bool CanForceDebug() const {
- return can_force_debug;
- }
-
-private:
- /// Attempts to parse a given sequence of capability descriptors.
- ///
- /// @param capabilities The sequence of capability descriptors to parse.
- /// @param num_capabilities The number of descriptors within the given sequence.
- /// @param page_table The memory manager that will perform any memory
- /// mapping if necessary.
- ///
- /// @return ResultSuccess if no errors occur, otherwise an error code.
- ///
- Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
-
- /// Attempts to parse a capability descriptor that is only represented by a
- /// single flag set.
- ///
- /// @param set_flags Running set of flags that are used to catch
- /// flags being initialized more than once when they shouldn't be.
- /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask.
- /// @param flag The flag to attempt to parse.
- /// @param page_table The memory manager that will perform any memory
- /// mapping if necessary.
- ///
- /// @return ResultSuccess if no errors occurred, otherwise an error code.
- ///
- Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table);
-
- /// Clears the internal state of this process capability instance. Necessary,
- /// to have a sane starting point due to us allowing running executables without
- /// configuration metadata. We assume a process is not going to have metadata,
- /// and if it turns out that the process does, in fact, have metadata, then
- /// we attempt to parse it. Thus, we need this to reset data members back to
- /// a good state.
- ///
- /// DO NOT ever make this a public member function. This isn't an invariant
- /// anything external should depend upon (and if anything comes to rely on it,
- /// you should immediately be questioning the design of that thing, not this
- /// class. If the kernel itself can run without depending on behavior like that,
- /// then so can yuzu).
- ///
- void Clear();
-
- /// Handles flags related to the priority and core number capability flags.
- Result HandlePriorityCoreNumFlags(u32 flags);
-
- /// Handles flags related to determining the allowable SVC mask.
- Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
-
- /// Handles flags related to mapping physical memory pages.
- Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
-
- /// Handles flags related to mapping IO pages.
- Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
-
- /// Handles flags related to mapping physical memory regions.
- Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
-
- /// Handles flags related to the interrupt capability flags.
- Result HandleInterruptFlags(u32 flags);
-
- /// Handles flags related to the program type.
- Result HandleProgramTypeFlags(u32 flags);
-
- /// Handles flags related to the handle table size.
- Result HandleHandleTableFlags(u32 flags);
-
- /// Handles flags related to the kernel version capability flags.
- Result HandleKernelVersionFlags(u32 flags);
-
- /// Handles flags related to debug-specific capabilities.
- Result HandleDebugFlags(u32 flags);
-
- SyscallCapabilities svc_capabilities;
- InterruptCapabilities interrupt_capabilities;
-
- u64 core_mask = 0;
- u64 priority_mask = 0;
-
- s32 handle_table_size = 0;
- u32 kernel_version = 0;
-
- ProgramType program_type = ProgramType::SysModule;
-
- bool is_debuggable = false;
- bool can_force_debug = false;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 871d541d4..b76683969 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -4426,7 +4426,7 @@ void Call(Core::System& system, u32 imm) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
+ if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
Call64(system, imm);
} else {
Call32(system, imm);
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index f99964028..ada998772 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -86,20 +86,19 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::TotalMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailable();
+ *result = process->GetTotalUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::UsedMemorySize:
- *result = process->GetTotalPhysicalMemoryUsed();
+ *result = process->GetUsedUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::SystemResourceSizeTotal:
- *result = process->GetSystemResourceSize();
+ *result = process->GetTotalSystemResourceSize();
R_SUCCEED();
case InfoType::SystemResourceSizeUsed:
- LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
- *result = process->GetSystemResourceUsage();
+ *result = process->GetUsedSystemResourceSize();
R_SUCCEED();
case InfoType::ProgramId:
@@ -111,20 +110,29 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::TotalNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
+ *result = process->GetTotalNonSystemUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::UsedNonSystemMemorySize:
- *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
+ *result = process->GetUsedNonSystemUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::IsApplication:
LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
- *result = true;
+ *result = process->IsApplication();
R_SUCCEED();
case InfoType::FreeThreadCount:
- *result = process->GetFreeThreadCount();
+ if (KResourceLimit* resource_limit = process->GetResourceLimit();
+ resource_limit != nullptr) {
+ const auto current_value =
+ resource_limit->GetCurrentValue(Svc::LimitableResource::ThreadCountMax);
+ const auto limit_value =
+ resource_limit->GetLimitValue(Svc::LimitableResource::ThreadCountMax);
+ *result = limit_value - current_value;
+ } else {
+ *result = 0;
+ }
R_SUCCEED();
default:
@@ -161,7 +169,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
case InfoType::RandomEntropy:
R_UNLESS(handle == 0, ResultInvalidHandle);
- R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination);
+ R_UNLESS(info_sub_id < 4, ResultInvalidCombination);
*result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
R_SUCCEED();
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
index 1d7bc4246..5f0833fcb 100644
--- a/src/core/hle/kernel/svc/svc_lock.cpp
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -17,7 +17,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u3
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
- R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag));
+ R_RETURN(KConditionVariable::WaitForAddress(system.Kernel(), thread_handle, address, tag));
}
/// Unlock a mutex
@@ -28,7 +28,7 @@ Result ArbitrateUnlock(Core::System& system, u64 address) {
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
- R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address));
+ R_RETURN(KConditionVariable::SignalToAddress(system.Kernel(), address));
}
Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp
index 97f1210de..4ca62860d 100644
--- a/src/core/hle/kernel/svc/svc_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_memory.cpp
@@ -29,7 +29,8 @@ constexpr bool IsValidAddressRange(u64 address, u64 size) {
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
-Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 src_addr, u64 size) {
+Result MapUnmapMemorySanityChecks(const KProcessPageTable& manager, u64 dst_addr, u64 src_addr,
+ u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
R_THROW(ResultInvalidAddress);
@@ -123,7 +124,8 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
// Set the memory attribute.
- R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
+ R_RETURN(page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
+ static_cast<KMemoryAttribute>(attr)));
}
/// Maps a memory range into a different range.
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index d3545f232..793e9f8d0 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -16,7 +16,14 @@ Result SetHeapSize(Core::System& system, u64* out_address, u64 size) {
R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
// Set the heap size.
- R_RETURN(GetCurrentProcess(system.Kernel()).GetPageTable().SetHeapSize(out_address, size));
+ KProcessAddress address{};
+ R_TRY(GetCurrentProcess(system.Kernel())
+ .GetPageTable()
+ .SetHeapSize(std::addressof(address), size));
+
+ // We succeeded.
+ *out_address = GetInteger(address);
+ R_SUCCEED();
}
/// Maps memory at a desired address
@@ -46,7 +53,7 @@ Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
auto& page_table{current_process->GetPageTable()};
- if (current_process->GetSystemResourceSize() == 0) {
+ if (current_process->GetTotalSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
R_THROW(ResultInvalidState);
}
@@ -95,7 +102,7 @@ Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
auto& page_table{current_process->GetPageTable()};
- if (current_process->GetSystemResourceSize() == 0) {
+ if (current_process->GetTotalSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
R_THROW(ResultInvalidState);
}
diff --git a/src/core/hle/kernel/svc/svc_process_memory.cpp b/src/core/hle/kernel/svc/svc_process_memory.cpp
index 07cd48175..e1427947b 100644
--- a/src/core/hle/kernel/svc/svc_process_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_process_memory.cpp
@@ -247,8 +247,7 @@ Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 d
R_THROW(ResultInvalidCurrentMemory);
}
- R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size,
- KPageTable::ICacheInvalidationStrategy::InvalidateAll));
+ R_RETURN(page_table.UnmapCodeMemory(dst_address, src_address, size));
}
Result SetProcessMemoryPermission64(Core::System& system, Handle process_handle, uint64_t address,
diff --git a/src/core/hle/kernel/svc/svc_query_memory.cpp b/src/core/hle/kernel/svc/svc_query_memory.cpp
index 51af06e97..816dcb8d0 100644
--- a/src/core/hle/kernel/svc/svc_query_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_query_memory.cpp
@@ -31,12 +31,12 @@ Result QueryProcessMemory(Core::System& system, uint64_t out_memory_info, PageIn
}
auto& current_memory{GetCurrentMemory(system.Kernel())};
- const auto memory_info{process->GetPageTable().QueryInfo(address).GetSvcMemoryInfo()};
- current_memory.WriteBlock(out_memory_info, std::addressof(memory_info), sizeof(memory_info));
+ KMemoryInfo mem_info;
+ R_TRY(process->GetPageTable().QueryInfo(std::addressof(mem_info), out_page_info, address));
- //! This is supposed to be part of the QueryInfo call.
- *out_page_info = {};
+ const auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+ current_memory.WriteBlock(out_memory_info, std::addressof(svc_mem_info), sizeof(svc_mem_info));
R_SUCCEED();
}
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 8ebc1bd1c..6c79cfd8d 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -132,7 +132,7 @@ void SynchronizePreemptionState(Core::System& system) {
GetCurrentThread(kernel).ClearInterruptFlag();
// Unpin the current thread.
- cur_process->UnpinCurrentThread(core_id);
+ cur_process->UnpinCurrentThread();
}
}
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 933b82e30..755fd62b5 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -85,10 +85,6 @@ Result StartThread(Core::System& system, Handle thread_handle) {
// Try to start the thread.
R_TRY(thread->Run());
- // If we succeeded, persist a reference to the thread.
- thread->Open();
- system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
-
R_SUCCEED();
}
@@ -99,7 +95,6 @@ void ExitThread(Core::System& system) {
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
- system.Kernel().UnregisterInUseObject(current_thread);
}
/// Sleep the current thread
@@ -260,7 +255,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_
auto list_iter = thread_list.cbegin();
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
- memory.Write64(out_thread_ids, (*list_iter)->GetThreadId());
+ memory.Write64(out_thread_ids, list_iter->GetThreadId());
out_thread_ids += sizeof(u64);
}
diff --git a/src/core/hle/kernel/svc_generator.py b/src/core/hle/kernel/svc_generator.py
index 7fcbb1ba1..5531faac6 100644
--- a/src/core/hle/kernel/svc_generator.py
+++ b/src/core/hle/kernel/svc_generator.py
@@ -592,7 +592,7 @@ void Call(Core::System& system, u32 imm) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
+ if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
Call64(system, imm);
} else {
Call32(system, imm);
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 251e6013c..50de02e36 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -604,13 +604,57 @@ enum class ProcessActivity : u32 {
Paused,
};
+enum class CreateProcessFlag : u32 {
+ // Is 64 bit?
+ Is64Bit = (1 << 0),
+
+ // What kind of address space?
+ AddressSpaceShift = 1,
+ AddressSpaceMask = (7 << AddressSpaceShift),
+ AddressSpace32Bit = (0 << AddressSpaceShift),
+ AddressSpace64BitDeprecated = (1 << AddressSpaceShift),
+ AddressSpace32BitWithoutAlias = (2 << AddressSpaceShift),
+ AddressSpace64Bit = (3 << AddressSpaceShift),
+
+ // Should JIT debug be done on crash?
+ EnableDebug = (1 << 4),
+
+ // Should ASLR be enabled for the process?
+ EnableAslr = (1 << 5),
+
+ // Is the process an application?
+ IsApplication = (1 << 6),
+
+ // 4.x deprecated: Should use secure memory?
+ DeprecatedUseSecureMemory = (1 << 7),
+
+ // 5.x+ Pool partition type.
+ PoolPartitionShift = 7,
+ PoolPartitionMask = (0xF << PoolPartitionShift),
+ PoolPartitionApplication = (0 << PoolPartitionShift),
+ PoolPartitionApplet = (1 << PoolPartitionShift),
+ PoolPartitionSystem = (2 << PoolPartitionShift),
+ PoolPartitionSystemNonSecure = (3 << PoolPartitionShift),
+
+ // 7.x+ Should memory allocation be optimized? This requires IsApplication.
+ OptimizeMemoryAllocation = (1 << 11),
+
+ // 11.x+ DisableDeviceAddressSpaceMerge.
+ DisableDeviceAddressSpaceMerge = (1 << 12),
+
+ // Mask of all flags.
+ All = Is64Bit | AddressSpaceMask | EnableDebug | EnableAslr | IsApplication |
+ PoolPartitionMask | OptimizeMemoryAllocation | DisableDeviceAddressSpaceMerge,
+};
+DECLARE_ENUM_FLAG_OPERATORS(CreateProcessFlag);
+
struct CreateProcessParameter {
std::array<char, 12> name;
u32 version;
u64 program_id;
u64 code_address;
s32 code_num_pages;
- u32 flags;
+ CreateProcessFlag flags;
Handle reslimit;
s32 system_resource_num_pages;
};
diff --git a/src/core/hle/result.h b/src/core/hle/result.h
index dd0b27f47..749f51f69 100644
--- a/src/core/hle/result.h
+++ b/src/core/hle/result.h
@@ -407,3 +407,34 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
/// Evaluates a boolean expression, and succeeds if that expression is true.
#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), ResultSuccess)
+
+#define R_TRY_CATCH(res_expr) \
+ { \
+ const auto R_CURRENT_RESULT = (res_expr); \
+ if (R_FAILED(R_CURRENT_RESULT)) { \
+ if (false)
+
+#define R_END_TRY_CATCH \
+ else if (R_FAILED(R_CURRENT_RESULT)) { \
+ R_THROW(R_CURRENT_RESULT); \
+ } \
+ } \
+ }
+
+#define R_CATCH_ALL() \
+ } \
+ else if (R_FAILED(R_CURRENT_RESULT)) { \
+ if (true)
+
+#define R_CATCH(res_expr) \
+ } \
+ else if ((res_expr) == (R_CURRENT_RESULT)) { \
+ if (true)
+
+#define R_CONVERT(catch_type, convert_type) \
+ R_CATCH(catch_type) { R_THROW(static_cast<Result>(convert_type)); }
+
+#define R_CONVERT_ALL(convert_type) \
+ R_CATCH_ALL() { R_THROW(static_cast<Result>(convert_type)); }
+
+#define R_ASSERT(res_expr) ASSERT(R_SUCCEEDED(res_expr))
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index 1b1c8190e..f21553644 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -3,11 +3,13 @@
#include <algorithm>
#include <array>
+
#include "common/common_types.h"
#include "common/fs/file.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "common/polyfill_ranges.h"
+#include "common/stb.h"
#include "common/string_util.h"
#include "common/swap.h"
#include "core/constants.h"
@@ -38,9 +40,36 @@ static std::filesystem::path GetImagePath(const Common::UUID& uuid) {
fmt::format("system/save/8000000000000010/su/avators/{}.jpg", uuid.FormattedString());
}
-static constexpr u32 SanitizeJPEGSize(std::size_t size) {
+static void JPGToMemory(void* context, void* data, int len) {
+ std::vector<u8>* jpg_image = static_cast<std::vector<u8>*>(context);
+ unsigned char* jpg = static_cast<unsigned char*>(data);
+ jpg_image->insert(jpg_image->end(), jpg, jpg + len);
+}
+
+static void SanitizeJPEGImageSize(std::vector<u8>& image) {
constexpr std::size_t max_jpeg_image_size = 0x20000;
- return static_cast<u32>(std::min(size, max_jpeg_image_size));
+ constexpr int profile_dimensions = 256;
+ int original_width, original_height, color_channels;
+
+ const auto plain_image =
+ stbi_load_from_memory(image.data(), static_cast<int>(image.size()), &original_width,
+ &original_height, &color_channels, STBI_rgb);
+
+ // Resize image to match 256*256
+ if (original_width != profile_dimensions || original_height != profile_dimensions) {
+ // Use vector instead of array to avoid overflowing the stack
+ std::vector<u8> out_image(profile_dimensions * profile_dimensions * STBI_rgb);
+ stbir_resize_uint8_srgb(plain_image, original_width, original_height, 0, out_image.data(),
+ profile_dimensions, profile_dimensions, 0, STBI_rgb, 0,
+ STBIR_FILTER_BOX);
+ image.clear();
+ if (!stbi_write_jpg_to_func(JPGToMemory, &image, profile_dimensions, profile_dimensions,
+ STBI_rgb, out_image.data(), 0)) {
+ LOG_ERROR(Service_ACC, "Failed to resize the user provided image.");
+ }
+ }
+
+ image.resize(std::min(image.size(), max_jpeg_image_size));
}
class IManagerForSystemService final : public ServiceFramework<IManagerForSystemService> {
@@ -339,19 +368,20 @@ protected:
LOG_WARNING(Service_ACC,
"Failed to load user provided image! Falling back to built-in backup...");
ctx.WriteBuffer(Core::Constants::ACCOUNT_BACKUP_JPEG);
- rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
+ rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
return;
}
- const u32 size = SanitizeJPEGSize(image.GetSize());
- std::vector<u8> buffer(size);
+ std::vector<u8> buffer(image.GetSize());
if (image.Read(buffer) != buffer.size()) {
LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image.");
}
+ SanitizeJPEGImageSize(buffer);
+
ctx.WriteBuffer(buffer);
- rb.Push<u32>(size);
+ rb.Push(static_cast<u32>(buffer.size()));
}
void GetImageSize(HLERequestContext& ctx) {
@@ -365,10 +395,18 @@ protected:
if (!image.IsOpen()) {
LOG_WARNING(Service_ACC,
"Failed to load user provided image! Falling back to built-in backup...");
- rb.Push(SanitizeJPEGSize(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
- } else {
- rb.Push(SanitizeJPEGSize(image.GetSize()));
+ rb.Push(static_cast<u32>(Core::Constants::ACCOUNT_BACKUP_JPEG.size()));
+ return;
}
+
+ std::vector<u8> buffer(image.GetSize());
+
+ if (image.Read(buffer) != buffer.size()) {
+ LOG_ERROR(Service_ACC, "Failed to read all the bytes in the user provided image.");
+ }
+
+ SanitizeJPEGImageSize(buffer);
+ rb.Push(static_cast<u32>(buffer.size()));
}
void Store(HLERequestContext& ctx) {
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 98765b81a..a266d7c21 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -13,6 +13,7 @@
#include "core/file_sys/patch_manager.h"
#include "core/file_sys/registered_cache.h"
#include "core/file_sys/savedata_factory.h"
+#include "core/hid/hid_types.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/result.h"
@@ -21,8 +22,10 @@
#include "core/hle/service/am/applet_ae.h"
#include "core/hle/service/am/applet_oe.h"
#include "core/hle/service/am/applets/applet_cabinet.h"
+#include "core/hle/service/am/applets/applet_controller.h"
#include "core/hle/service/am/applets/applet_mii_edit_types.h"
#include "core/hle/service/am/applets/applet_profile_select.h"
+#include "core/hle/service/am/applets/applet_software_keyboard_types.h"
#include "core/hle/service/am/applets/applet_web_browser.h"
#include "core/hle/service/am/applets/applets.h"
#include "core/hle/service/am/idle.h"
@@ -31,8 +34,10 @@
#include "core/hle/service/apm/apm_controller.h"
#include "core/hle/service/apm/apm_interface.h"
#include "core/hle/service/bcat/backend/backend.h"
+#include "core/hle/service/caps/caps_su.h"
#include "core/hle/service/caps/caps_types.h"
#include "core/hle/service/filesystem/filesystem.h"
+#include "core/hle/service/hid/controllers/npad.h"
#include "core/hle/service/ipc_helpers.h"
#include "core/hle/service/ns/ns.h"
#include "core/hle/service/nvnflinger/fb_share_buffer_manager.h"
@@ -71,7 +76,7 @@ IWindowController::IWindowController(Core::System& system_)
static const FunctionInfo functions[] = {
{0, nullptr, "CreateWindow"},
{1, &IWindowController::GetAppletResourceUserId, "GetAppletResourceUserId"},
- {2, nullptr, "GetAppletResourceUserIdOfCallerApplet"},
+ {2, &IWindowController::GetAppletResourceUserIdOfCallerApplet, "GetAppletResourceUserIdOfCallerApplet"},
{10, &IWindowController::AcquireForegroundRights, "AcquireForegroundRights"},
{11, nullptr, "ReleaseForegroundRights"},
{12, nullptr, "RejectToChangeIntoBackground"},
@@ -95,6 +100,16 @@ void IWindowController::GetAppletResourceUserId(HLERequestContext& ctx) {
rb.Push<u64>(process_id);
}
+void IWindowController::GetAppletResourceUserIdOfCallerApplet(HLERequestContext& ctx) {
+ const u64 process_id = 0;
+
+ LOG_WARNING(Service_AM, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.Push<u64>(process_id);
+}
+
void IWindowController::AcquireForegroundRights(HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
@@ -702,9 +717,17 @@ void ISelfController::SetAlbumImageTakenNotificationEnabled(HLERequestContext& c
void ISelfController::SaveCurrentScreenshot(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
- const auto album_report_option = rp.PopEnum<Capture::AlbumReportOption>();
+ const auto report_option = rp.PopEnum<Capture::AlbumReportOption>();
+
+ LOG_INFO(Service_AM, "called, report_option={}", report_option);
+
+ const auto screenshot_service =
+ system.ServiceManager().GetService<Service::Capture::IScreenShotApplicationService>(
+ "caps:su");
- LOG_WARNING(Service_AM, "(STUBBED) called. album_report_option={}", album_report_option);
+ if (screenshot_service) {
+ screenshot_service->CaptureAndSaveScreenshot(report_option);
+ }
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
@@ -796,7 +819,9 @@ ILockAccessor::ILockAccessor(Core::System& system_)
lock_event = service_context.CreateEvent("ILockAccessor::LockEvent");
}
-ILockAccessor::~ILockAccessor() = default;
+ILockAccessor::~ILockAccessor() {
+ service_context.CloseEvent(lock_event);
+};
void ILockAccessor::TryLock(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
@@ -909,7 +934,9 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system_,
msg_queue->PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoForeground);
}
-ICommonStateGetter::~ICommonStateGetter() = default;
+ICommonStateGetter::~ICommonStateGetter() {
+ service_context.CloseEvent(sleep_lock_event);
+};
void ICommonStateGetter::GetBootMode(HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called");
@@ -1551,14 +1578,14 @@ ILibraryAppletSelfAccessor::ILibraryAppletSelfAccessor(Core::System& system_)
{6, nullptr, "GetPopInteractiveInDataEvent"},
{10, &ILibraryAppletSelfAccessor::ExitProcessAndReturn, "ExitProcessAndReturn"},
{11, &ILibraryAppletSelfAccessor::GetLibraryAppletInfo, "GetLibraryAppletInfo"},
- {12, nullptr, "GetMainAppletIdentityInfo"},
+ {12, &ILibraryAppletSelfAccessor::GetMainAppletIdentityInfo, "GetMainAppletIdentityInfo"},
{13, nullptr, "CanUseApplicationCore"},
{14, &ILibraryAppletSelfAccessor::GetCallerAppletIdentityInfo, "GetCallerAppletIdentityInfo"},
{15, nullptr, "GetMainAppletApplicationControlProperty"},
{16, nullptr, "GetMainAppletStorageId"},
{17, nullptr, "GetCallerAppletIdentityInfoStack"},
{18, nullptr, "GetNextReturnDestinationAppletIdentityInfo"},
- {19, nullptr, "GetDesirableKeyboardLayout"},
+ {19, &ILibraryAppletSelfAccessor::GetDesirableKeyboardLayout, "GetDesirableKeyboardLayout"},
{20, nullptr, "PopExtraStorage"},
{25, nullptr, "GetPopExtraStorageEvent"},
{30, nullptr, "UnpopInData"},
@@ -1577,7 +1604,7 @@ ILibraryAppletSelfAccessor::ILibraryAppletSelfAccessor(Core::System& system_)
{120, nullptr, "GetLaunchStorageInfoForDebug"},
{130, nullptr, "GetGpuErrorDetectedSystemEvent"},
{140, nullptr, "SetApplicationMemoryReservation"},
- {150, nullptr, "ShouldSetGpuTimeSliceManually"},
+ {150, &ILibraryAppletSelfAccessor::ShouldSetGpuTimeSliceManually, "ShouldSetGpuTimeSliceManually"},
};
// clang-format on
RegisterHandlers(functions);
@@ -1592,6 +1619,12 @@ ILibraryAppletSelfAccessor::ILibraryAppletSelfAccessor(Core::System& system_)
case Applets::AppletId::PhotoViewer:
PushInShowAlbum();
break;
+ case Applets::AppletId::SoftwareKeyboard:
+ PushInShowSoftwareKeyboard();
+ break;
+ case Applets::AppletId::Controller:
+ PushInShowController();
+ break;
default:
break;
}
@@ -1649,13 +1682,33 @@ void ILibraryAppletSelfAccessor::GetLibraryAppletInfo(HLERequestContext& ctx) {
rb.PushRaw(applet_info);
}
-void ILibraryAppletSelfAccessor::GetCallerAppletIdentityInfo(HLERequestContext& ctx) {
+void ILibraryAppletSelfAccessor::GetMainAppletIdentityInfo(HLERequestContext& ctx) {
struct AppletIdentityInfo {
Applets::AppletId applet_id;
INSERT_PADDING_BYTES(0x4);
u64 application_id;
};
+ static_assert(sizeof(AppletIdentityInfo) == 0x10, "AppletIdentityInfo has incorrect size.");
+
+ LOG_WARNING(Service_AM, "(STUBBED) called");
+
+ const AppletIdentityInfo applet_info{
+ .applet_id = Applets::AppletId::QLaunch,
+ .application_id = 0x0100000000001000ull,
+ };
+
+ IPC::ResponseBuilder rb{ctx, 6};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(applet_info);
+}
+void ILibraryAppletSelfAccessor::GetCallerAppletIdentityInfo(HLERequestContext& ctx) {
+ struct AppletIdentityInfo {
+ Applets::AppletId applet_id;
+ INSERT_PADDING_BYTES(0x4);
+ u64 application_id;
+ };
+ static_assert(sizeof(AppletIdentityInfo) == 0x10, "AppletIdentityInfo has incorrect size.");
LOG_WARNING(Service_AM, "(STUBBED) called");
const AppletIdentityInfo applet_info{
@@ -1668,6 +1721,14 @@ void ILibraryAppletSelfAccessor::GetCallerAppletIdentityInfo(HLERequestContext&
rb.PushRaw(applet_info);
}
+void ILibraryAppletSelfAccessor::GetDesirableKeyboardLayout(HLERequestContext& ctx) {
+ LOG_WARNING(Service_AM, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push<u32>(0);
+}
+
void ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers(HLERequestContext& ctx) {
const Service::Account::ProfileManager manager{};
bool is_empty{true};
@@ -1687,6 +1748,14 @@ void ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers(HLERequestContext&
rb.Push(user_count);
}
+void ILibraryAppletSelfAccessor::ShouldSetGpuTimeSliceManually(HLERequestContext& ctx) {
+ LOG_WARNING(Service_AM, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ rb.Push<u8>(0);
+}
+
void ILibraryAppletSelfAccessor::PushInShowAlbum() {
const Applets::CommonArguments arguments{
.arguments_version = Applets::CommonArgumentVersion::Version3,
@@ -1704,6 +1773,55 @@ void ILibraryAppletSelfAccessor::PushInShowAlbum() {
queue_data.emplace_back(std::move(settings_data));
}
+void ILibraryAppletSelfAccessor::PushInShowController() {
+ const Applets::CommonArguments common_args = {
+ .arguments_version = Applets::CommonArgumentVersion::Version3,
+ .size = Applets::CommonArgumentSize::Version3,
+ .library_version = static_cast<u32>(Applets::ControllerAppletVersion::Version8),
+ .theme_color = Applets::ThemeColor::BasicBlack,
+ .play_startup_sound = true,
+ .system_tick = system.CoreTiming().GetClockTicks(),
+ };
+
+ Applets::ControllerSupportArgNew user_args = {
+ .header = {.player_count_min = 1,
+ .player_count_max = 4,
+ .enable_take_over_connection = true,
+ .enable_left_justify = false,
+ .enable_permit_joy_dual = true,
+ .enable_single_mode = false,
+ .enable_identification_color = false},
+ .identification_colors = {},
+ .enable_explain_text = false,
+ .explain_text = {},
+ };
+
+ Applets::ControllerSupportArgPrivate private_args = {
+ .arg_private_size = sizeof(Applets::ControllerSupportArgPrivate),
+ .arg_size = sizeof(Applets::ControllerSupportArgNew),
+ .is_home_menu = true,
+ .flag_1 = true,
+ .mode = Applets::ControllerSupportMode::ShowControllerSupport,
+ .caller = Applets::ControllerSupportCaller::
+ Application, // switchbrew: Always zero except with
+ // ShowControllerFirmwareUpdateForSystem/ShowControllerKeyRemappingForSystem,
+ // which sets this to the input param
+ .style_set = Core::HID::NpadStyleSet::None,
+ .joy_hold_type = 0,
+ };
+ std::vector<u8> common_args_data(sizeof(common_args));
+ std::vector<u8> private_args_data(sizeof(private_args));
+ std::vector<u8> user_args_data(sizeof(user_args));
+
+ std::memcpy(common_args_data.data(), &common_args, sizeof(common_args));
+ std::memcpy(private_args_data.data(), &private_args, sizeof(private_args));
+ std::memcpy(user_args_data.data(), &user_args, sizeof(user_args));
+
+ queue_data.emplace_back(std::move(common_args_data));
+ queue_data.emplace_back(std::move(private_args_data));
+ queue_data.emplace_back(std::move(user_args_data));
+}
+
void ILibraryAppletSelfAccessor::PushInShowCabinetData() {
const Applets::CommonArguments arguments{
.arguments_version = Applets::CommonArgumentVersion::Version3,
@@ -1755,6 +1873,61 @@ void ILibraryAppletSelfAccessor::PushInShowMiiEditData() {
queue_data.emplace_back(std::move(argument_data));
}
+void ILibraryAppletSelfAccessor::PushInShowSoftwareKeyboard() {
+ const Applets::CommonArguments arguments{
+ .arguments_version = Applets::CommonArgumentVersion::Version3,
+ .size = Applets::CommonArgumentSize::Version3,
+ .library_version = static_cast<u32>(Applets::SwkbdAppletVersion::Version524301),
+ .theme_color = Applets::ThemeColor::BasicBlack,
+ .play_startup_sound = true,
+ .system_tick = system.CoreTiming().GetClockTicks(),
+ };
+
+ std::vector<char16_t> initial_string(0);
+
+ const Applets::SwkbdConfigCommon swkbd_config{
+ .type = Applets::SwkbdType::Qwerty,
+ .ok_text{},
+ .left_optional_symbol_key{},
+ .right_optional_symbol_key{},
+ .use_prediction = false,
+ .key_disable_flags{},
+ .initial_cursor_position = Applets::SwkbdInitialCursorPosition::Start,
+ .header_text{},
+ .sub_text{},
+ .guide_text{},
+ .max_text_length = 500,
+ .min_text_length = 0,
+ .password_mode = Applets::SwkbdPasswordMode::Disabled,
+ .text_draw_type = Applets::SwkbdTextDrawType::Box,
+ .enable_return_button = true,
+ .use_utf8 = false,
+ .use_blur_background = true,
+ .initial_string_offset{},
+ .initial_string_length = static_cast<u32>(initial_string.size()),
+ .user_dictionary_offset{},
+ .user_dictionary_entries{},
+ .use_text_check = false,
+ };
+
+ Applets::SwkbdConfigNew swkbd_config_new{};
+
+ std::vector<u8> argument_data(sizeof(arguments));
+ std::vector<u8> swkbd_data(sizeof(swkbd_config) + sizeof(swkbd_config_new));
+ std::vector<u8> work_buffer(swkbd_config.initial_string_length * sizeof(char16_t));
+
+ std::memcpy(argument_data.data(), &arguments, sizeof(arguments));
+ std::memcpy(swkbd_data.data(), &swkbd_config, sizeof(swkbd_config));
+ std::memcpy(swkbd_data.data() + sizeof(swkbd_config), &swkbd_config_new,
+ sizeof(Applets::SwkbdConfigNew));
+ std::memcpy(work_buffer.data(), initial_string.data(),
+ swkbd_config.initial_string_length * sizeof(char16_t));
+
+ queue_data.emplace_back(std::move(argument_data));
+ queue_data.emplace_back(std::move(swkbd_data));
+ queue_data.emplace_back(std::move(work_buffer));
+}
+
IAppletCommonFunctions::IAppletCommonFunctions(Core::System& system_)
: ServiceFramework{system_, "IAppletCommonFunctions"} {
// clang-format off
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 64b3f3fe2..905a71b9f 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -87,6 +87,7 @@ public:
private:
void GetAppletResourceUserId(HLERequestContext& ctx);
+ void GetAppletResourceUserIdOfCallerApplet(HLERequestContext& ctx);
void AcquireForegroundRights(HLERequestContext& ctx);
};
@@ -345,13 +346,18 @@ private:
void PopInData(HLERequestContext& ctx);
void PushOutData(HLERequestContext& ctx);
void GetLibraryAppletInfo(HLERequestContext& ctx);
+ void GetMainAppletIdentityInfo(HLERequestContext& ctx);
void ExitProcessAndReturn(HLERequestContext& ctx);
void GetCallerAppletIdentityInfo(HLERequestContext& ctx);
+ void GetDesirableKeyboardLayout(HLERequestContext& ctx);
void GetMainAppletAvailableUsers(HLERequestContext& ctx);
+ void ShouldSetGpuTimeSliceManually(HLERequestContext& ctx);
void PushInShowAlbum();
void PushInShowCabinetData();
void PushInShowMiiEditData();
+ void PushInShowSoftwareKeyboard();
+ void PushInShowController();
std::deque<std::vector<u8>> queue_data;
};
diff --git a/src/core/hle/service/am/applets/applet_cabinet.cpp b/src/core/hle/service/am/applets/applet_cabinet.cpp
index 19ed184e8..3906c0fa4 100644
--- a/src/core/hle/service/am/applets/applet_cabinet.cpp
+++ b/src/core/hle/service/am/applets/applet_cabinet.cpp
@@ -25,7 +25,9 @@ Cabinet::Cabinet(Core::System& system_, LibraryAppletMode applet_mode_,
service_context.CreateEvent("CabinetApplet:AvailabilityChangeEvent");
}
-Cabinet::~Cabinet() = default;
+Cabinet::~Cabinet() {
+ service_context.CloseEvent(availability_change_event);
+};
void Cabinet::Initialize() {
Applet::Initialize();
@@ -120,7 +122,8 @@ void Cabinet::DisplayCompleted(bool apply_changes, std::string_view amiibo_name)
Service::NFP::RegisterInfoPrivate register_info{};
std::memcpy(register_info.amiibo_name.data(), amiibo_name.data(),
std::min(amiibo_name.size(), register_info.amiibo_name.size() - 1));
-
+ register_info.mii_store_data.BuildRandom(Mii::Age::All, Mii::Gender::All, Mii::Race::All);
+ register_info.mii_store_data.SetNickname({u'y', u'u', u'z', u'u'});
nfp_device->SetRegisterInfoPrivate(register_info);
break;
}
@@ -128,7 +131,7 @@ void Cabinet::DisplayCompleted(bool apply_changes, std::string_view amiibo_name)
nfp_device->DeleteApplicationArea();
break;
case Service::NFP::CabinetMode::StartRestorer:
- nfp_device->RestoreAmiibo();
+ nfp_device->Restore();
break;
case Service::NFP::CabinetMode::StartFormatter:
nfp_device->Format();
diff --git a/src/core/hle/service/am/applets/applet_controller.h b/src/core/hle/service/am/applets/applet_controller.h
index f6c64f633..9f839f3d7 100644
--- a/src/core/hle/service/am/applets/applet_controller.h
+++ b/src/core/hle/service/am/applets/applet_controller.h
@@ -56,7 +56,7 @@ enum class ControllerSupportResult : u32 {
struct ControllerSupportArgPrivate {
u32 arg_private_size{};
u32 arg_size{};
- bool flag_0{};
+ bool is_home_menu{};
bool flag_1{};
ControllerSupportMode mode{};
ControllerSupportCaller caller{};
diff --git a/src/core/hle/service/am/applets/applet_web_browser.cpp b/src/core/hle/service/am/applets/applet_web_browser.cpp
index 1c9a1dc29..b0ea2b381 100644
--- a/src/core/hle/service/am/applets/applet_web_browser.cpp
+++ b/src/core/hle/service/am/applets/applet_web_browser.cpp
@@ -330,8 +330,7 @@ void WebBrowser::ExtractOfflineRomFS() {
LOG_DEBUG(Service_AM, "Extracting RomFS to {}",
Common::FS::PathToUTF8String(offline_cache_dir));
- const auto extracted_romfs_dir =
- FileSys::ExtractRomFS(offline_romfs, FileSys::RomFSExtractionType::SingleDiscard);
+ const auto extracted_romfs_dir = FileSys::ExtractRomFS(offline_romfs);
const auto temp_dir = system.GetFilesystem()->CreateDirectory(
Common::FS::PathToUTF8String(offline_cache_dir), FileSys::Mode::ReadWrite);
diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h
index f02bbc450..0bf2598b7 100644
--- a/src/core/hle/service/am/applets/applets.h
+++ b/src/core/hle/service/am/applets/applets.h
@@ -69,6 +69,30 @@ enum class AppletId : u32 {
MyPage = 0x1A,
};
+enum class AppletProgramId : u64 {
+ QLaunch = 0x0100000000001000ull,
+ Auth = 0x0100000000001001ull,
+ Cabinet = 0x0100000000001002ull,
+ Controller = 0x0100000000001003ull,
+ DataErase = 0x0100000000001004ull,
+ Error = 0x0100000000001005ull,
+ NetConnect = 0x0100000000001006ull,
+ ProfileSelect = 0x0100000000001007ull,
+ SoftwareKeyboard = 0x0100000000001008ull,
+ MiiEdit = 0x0100000000001009ull,
+ Web = 0x010000000000100Aull,
+ Shop = 0x010000000000100Bull,
+ OverlayDisplay = 0x010000000000100Cull,
+ PhotoViewer = 0x010000000000100Dull,
+ Settings = 0x010000000000100Eull,
+ OfflineWeb = 0x010000000000100Full,
+ LoginShare = 0x0100000000001010ull,
+ WebAuth = 0x0100000000001011ull,
+ Starter = 0x0100000000001012ull,
+ MyPage = 0x0100000000001013ull,
+ MaxProgramId = 0x0100000000001FFFull,
+};
+
enum class LibraryAppletMode : u32 {
AllForeground = 0,
Background = 1,
diff --git a/src/core/hle/service/btm/btm.cpp b/src/core/hle/service/btm/btm.cpp
index 8069f75b7..c65e32489 100644
--- a/src/core/hle/service/btm/btm.cpp
+++ b/src/core/hle/service/btm/btm.cpp
@@ -127,7 +127,7 @@ public:
private:
void GetCore(HLERequestContext& ctx) {
- LOG_DEBUG(Service_BTM, "called");
+ LOG_WARNING(Service_BTM, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
@@ -263,13 +263,13 @@ public:
explicit IBtmSystemCore(Core::System& system_) : ServiceFramework{system_, "IBtmSystemCore"} {
// clang-format off
static const FunctionInfo functions[] = {
- {0, nullptr, "StartGamepadPairing"},
- {1, nullptr, "CancelGamepadPairing"},
+ {0, &IBtmSystemCore::StartGamepadPairing, "StartGamepadPairing"},
+ {1, &IBtmSystemCore::CancelGamepadPairing, "CancelGamepadPairing"},
{2, nullptr, "ClearGamepadPairingDatabase"},
{3, nullptr, "GetPairedGamepadCount"},
{4, nullptr, "EnableRadio"},
{5, nullptr, "DisableRadio"},
- {6, nullptr, "GetRadioOnOff"},
+ {6, &IBtmSystemCore::IsRadioEnabled, "IsRadioEnabled"},
{7, nullptr, "AcquireRadioEvent"},
{8, nullptr, "AcquireGamepadPairingEvent"},
{9, nullptr, "IsGamepadPairingStarted"},
@@ -280,18 +280,58 @@ public:
{14, nullptr, "AcquireAudioDeviceConnectionEvent"},
{15, nullptr, "ConnectAudioDevice"},
{16, nullptr, "IsConnectingAudioDevice"},
- {17, nullptr, "GetConnectedAudioDevices"},
+ {17, &IBtmSystemCore::GetConnectedAudioDevices, "GetConnectedAudioDevices"},
{18, nullptr, "DisconnectAudioDevice"},
{19, nullptr, "AcquirePairedAudioDeviceInfoChangedEvent"},
{20, nullptr, "GetPairedAudioDevices"},
{21, nullptr, "RemoveAudioDevicePairing"},
- {22, nullptr, "RequestAudioDeviceConnectionRejection"},
- {23, nullptr, "CancelAudioDeviceConnectionRejection"}
+ {22, &IBtmSystemCore::RequestAudioDeviceConnectionRejection, "RequestAudioDeviceConnectionRejection"},
+ {23, &IBtmSystemCore::CancelAudioDeviceConnectionRejection, "CancelAudioDeviceConnectionRejection"}
};
// clang-format on
RegisterHandlers(functions);
}
+
+private:
+ void IsRadioEnabled(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_BTM, "(STUBBED) called"); // Spams a lot when controller applet is running
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(true);
+ }
+
+ void StartGamepadPairing(HLERequestContext& ctx) {
+ LOG_WARNING(Service_BTM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ }
+
+ void CancelGamepadPairing(HLERequestContext& ctx) {
+ LOG_WARNING(Service_BTM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ }
+
+ void CancelAudioDeviceConnectionRejection(HLERequestContext& ctx) {
+ LOG_WARNING(Service_BTM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ }
+
+ void GetConnectedAudioDevices(HLERequestContext& ctx) {
+ LOG_WARNING(Service_BTM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push<u32>(0);
+ }
+
+ void RequestAudioDeviceConnectionRejection(HLERequestContext& ctx) {
+ LOG_WARNING(Service_BTM, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ }
};
class BTM_SYS final : public ServiceFramework<BTM_SYS> {
@@ -308,7 +348,7 @@ public:
private:
void GetCore(HLERequestContext& ctx) {
- LOG_DEBUG(Service_BTM, "called");
+ LOG_WARNING(Service_BTM, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/caps/caps_manager.cpp b/src/core/hle/service/caps/caps_manager.cpp
index 7d733eb54..96b225d5f 100644
--- a/src/core/hle/service/caps/caps_manager.cpp
+++ b/src/core/hle/service/caps/caps_manager.cpp
@@ -228,12 +228,14 @@ Result AlbumManager::LoadAlbumScreenShotThumbnail(
Result AlbumManager::SaveScreenShot(ApplicationAlbumEntry& out_entry,
const ScreenShotAttribute& attribute,
- std::span<const u8> image_data, u64 aruid) {
- return SaveScreenShot(out_entry, attribute, {}, image_data, aruid);
+ AlbumReportOption report_option, std::span<const u8> image_data,
+ u64 aruid) {
+ return SaveScreenShot(out_entry, attribute, report_option, {}, image_data, aruid);
}
Result AlbumManager::SaveScreenShot(ApplicationAlbumEntry& out_entry,
const ScreenShotAttribute& attribute,
+ AlbumReportOption report_option,
const ApplicationData& app_data, std::span<const u8> image_data,
u64 aruid) {
const u64 title_id = system.GetApplicationProcessProgramID();
@@ -407,10 +409,14 @@ Result AlbumManager::LoadImage(std::span<u8> out_image, const std::filesystem::p
return ResultSuccess;
}
-static void PNGToMemory(void* context, void* png, int len) {
+void AlbumManager::FlipVerticallyOnWrite(bool flip) {
+ stbi_flip_vertically_on_write(flip);
+}
+
+static void PNGToMemory(void* context, void* data, int len) {
std::vector<u8>* png_image = static_cast<std::vector<u8>*>(context);
- png_image->reserve(len);
- std::memcpy(png_image->data(), png, len);
+ unsigned char* png = static_cast<unsigned char*>(data);
+ png_image->insert(png_image->end(), png, png + len);
}
Result AlbumManager::SaveImage(ApplicationAlbumEntry& out_entry, std::span<const u8> image,
diff --git a/src/core/hle/service/caps/caps_manager.h b/src/core/hle/service/caps/caps_manager.h
index 44d85117f..e20c70c7b 100644
--- a/src/core/hle/service/caps/caps_manager.h
+++ b/src/core/hle/service/caps/caps_manager.h
@@ -59,14 +59,17 @@ public:
const ScreenShotDecodeOption& decoder_options) const;
Result SaveScreenShot(ApplicationAlbumEntry& out_entry, const ScreenShotAttribute& attribute,
- std::span<const u8> image_data, u64 aruid);
- Result SaveScreenShot(ApplicationAlbumEntry& out_entry, const ScreenShotAttribute& attribute,
- const ApplicationData& app_data, std::span<const u8> image_data,
+ AlbumReportOption report_option, std::span<const u8> image_data,
u64 aruid);
+ Result SaveScreenShot(ApplicationAlbumEntry& out_entry, const ScreenShotAttribute& attribute,
+ AlbumReportOption report_option, const ApplicationData& app_data,
+ std::span<const u8> image_data, u64 aruid);
Result SaveEditedScreenShot(ApplicationAlbumEntry& out_entry,
const ScreenShotAttribute& attribute, const AlbumFileId& file_id,
std::span<const u8> image_data);
+ void FlipVerticallyOnWrite(bool flip);
+
private:
static constexpr std::size_t NandAlbumFileLimit = 1000;
static constexpr std::size_t SdAlbumFileLimit = 10000;
diff --git a/src/core/hle/service/caps/caps_ss.cpp b/src/core/hle/service/caps/caps_ss.cpp
index 1ba2b7972..eab023568 100644
--- a/src/core/hle/service/caps/caps_ss.cpp
+++ b/src/core/hle/service/caps/caps_ss.cpp
@@ -34,7 +34,7 @@ void IScreenShotService::SaveScreenShotEx0(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
struct Parameters {
ScreenShotAttribute attribute{};
- u32 report_option{};
+ AlbumReportOption report_option{};
INSERT_PADDING_BYTES(0x4);
u64 applet_resource_user_id{};
};
@@ -49,13 +49,16 @@ void IScreenShotService::SaveScreenShotEx0(HLERequestContext& ctx) {
parameters.applet_resource_user_id);
ApplicationAlbumEntry entry{};
- const auto result = manager->SaveScreenShot(entry, parameters.attribute, image_data_buffer,
- parameters.applet_resource_user_id);
+ manager->FlipVerticallyOnWrite(false);
+ const auto result =
+ manager->SaveScreenShot(entry, parameters.attribute, parameters.report_option,
+ image_data_buffer, parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 10};
rb.Push(result);
rb.PushRaw(entry);
}
+
void IScreenShotService::SaveEditedScreenShotEx1(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
struct Parameters {
@@ -83,6 +86,7 @@ void IScreenShotService::SaveEditedScreenShotEx1(HLERequestContext& ctx) {
image_data_buffer.size(), thumbnail_image_data_buffer.size());
ApplicationAlbumEntry entry{};
+ manager->FlipVerticallyOnWrite(false);
const auto result = manager->SaveEditedScreenShot(entry, parameters.attribute,
parameters.file_id, image_data_buffer);
diff --git a/src/core/hle/service/caps/caps_su.cpp b/src/core/hle/service/caps/caps_su.cpp
index e85625ee4..296b07b00 100644
--- a/src/core/hle/service/caps/caps_su.cpp
+++ b/src/core/hle/service/caps/caps_su.cpp
@@ -2,10 +2,12 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/logging/log.h"
+#include "core/core.h"
#include "core/hle/service/caps/caps_manager.h"
#include "core/hle/service/caps/caps_su.h"
#include "core/hle/service/caps/caps_types.h"
#include "core/hle/service/ipc_helpers.h"
+#include "video_core/renderer_base.h"
namespace Service::Capture {
@@ -58,8 +60,10 @@ void IScreenShotApplicationService::SaveScreenShotEx0(HLERequestContext& ctx) {
parameters.applet_resource_user_id);
ApplicationAlbumEntry entry{};
- const auto result = manager->SaveScreenShot(entry, parameters.attribute, image_data_buffer,
- parameters.applet_resource_user_id);
+ manager->FlipVerticallyOnWrite(false);
+ const auto result =
+ manager->SaveScreenShot(entry, parameters.attribute, parameters.report_option,
+ image_data_buffer, parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 10};
rb.Push(result);
@@ -88,13 +92,43 @@ void IScreenShotApplicationService::SaveScreenShotEx1(HLERequestContext& ctx) {
ApplicationAlbumEntry entry{};
ApplicationData app_data{};
std::memcpy(&app_data, app_data_buffer.data(), sizeof(ApplicationData));
+ manager->FlipVerticallyOnWrite(false);
const auto result =
- manager->SaveScreenShot(entry, parameters.attribute, app_data, image_data_buffer,
- parameters.applet_resource_user_id);
+ manager->SaveScreenShot(entry, parameters.attribute, parameters.report_option, app_data,
+ image_data_buffer, parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 10};
rb.Push(result);
rb.PushRaw(entry);
}
+void IScreenShotApplicationService::CaptureAndSaveScreenshot(AlbumReportOption report_option) {
+ auto& renderer = system.Renderer();
+ Layout::FramebufferLayout layout =
+ Layout::DefaultFrameLayout(screenshot_width, screenshot_height);
+
+ const Capture::ScreenShotAttribute attribute{
+ .unknown_0{},
+ .orientation = Capture::AlbumImageOrientation::None,
+ .unknown_1{},
+ .unknown_2{},
+ };
+
+ renderer.RequestScreenshot(
+ image_data.data(),
+ [attribute, report_option, this](bool invert_y) {
+ // Convert from BGRA to RGBA
+ for (std::size_t i = 0; i < image_data.size(); i += bytes_per_pixel) {
+ const u8 temp = image_data[i];
+ image_data[i] = image_data[i + 2];
+ image_data[i + 2] = temp;
+ }
+
+ Capture::ApplicationAlbumEntry entry{};
+ manager->FlipVerticallyOnWrite(invert_y);
+ manager->SaveScreenShot(entry, attribute, report_option, image_data, {});
+ },
+ layout);
+}
+
} // namespace Service::Capture
diff --git a/src/core/hle/service/caps/caps_su.h b/src/core/hle/service/caps/caps_su.h
index 89e71f506..21912e95f 100644
--- a/src/core/hle/service/caps/caps_su.h
+++ b/src/core/hle/service/caps/caps_su.h
@@ -10,6 +10,7 @@ class System;
}
namespace Service::Capture {
+enum class AlbumReportOption : s32;
class AlbumManager;
class IScreenShotApplicationService final : public ServiceFramework<IScreenShotApplicationService> {
@@ -18,11 +19,19 @@ public:
std::shared_ptr<AlbumManager> album_manager);
~IScreenShotApplicationService() override;
+ void CaptureAndSaveScreenshot(AlbumReportOption report_option);
+
private:
+ static constexpr std::size_t screenshot_width = 1280;
+ static constexpr std::size_t screenshot_height = 720;
+ static constexpr std::size_t bytes_per_pixel = 4;
+
void SetShimLibraryVersion(HLERequestContext& ctx);
void SaveScreenShotEx0(HLERequestContext& ctx);
void SaveScreenShotEx1(HLERequestContext& ctx);
+ std::array<u8, screenshot_width * screenshot_height * bytes_per_pixel> image_data;
+
std::shared_ptr<AlbumManager> manager;
};
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index 9d05f9801..0507b14e7 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -32,7 +32,7 @@ public:
{10200, nullptr, "SendFriendRequestForApplication"},
{10211, nullptr, "AddFacedFriendRequestForApplication"},
{10400, &IFriendService::GetBlockedUserListIds, "GetBlockedUserListIds"},
- {10420, nullptr, "IsBlockedUserListCacheAvailable"},
+ {10420, &IFriendService::CheckBlockedUserListAvailability, "CheckBlockedUserListAvailability"},
{10421, nullptr, "EnsureBlockedUserListAvailable"},
{10500, nullptr, "GetProfileList"},
{10600, nullptr, "DeclareOpenOnlinePlaySession"},
@@ -206,6 +206,17 @@ private:
rb.Push(true);
}
+ void CheckBlockedUserListAvailability(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto uuid{rp.PopRaw<Common::UUID>()};
+
+ LOG_WARNING(Service_Friend, "(STUBBED) called, uuid=0x{}", uuid.RawString());
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(true);
+ }
+
KernelHelpers::ServiceContext service_context;
Kernel::KEvent* completion_event;
diff --git a/src/core/hle/service/hid/controllers/console_six_axis.cpp b/src/core/hle/service/hid/controllers/console_six_axis.cpp
new file mode 100644
index 000000000..b2bf1d78d
--- /dev/null
+++ b/src/core/hle/service/hid/controllers/console_six_axis.cpp
@@ -0,0 +1,42 @@
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hid/emulated_console.h"
+#include "core/hid/hid_core.h"
+#include "core/hle/service/hid/controllers/console_six_axis.h"
+#include "core/memory.h"
+
+namespace Service::HID {
+constexpr std::size_t SHARED_MEMORY_OFFSET = 0x3C200;
+
+ConsoleSixAxis::ConsoleSixAxis(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
+ : ControllerBase{hid_core_} {
+ console = hid_core.GetEmulatedConsole();
+ static_assert(SHARED_MEMORY_OFFSET + sizeof(ConsoleSharedMemory) < shared_memory_size,
+ "ConsoleSharedMemory is bigger than the shared memory");
+ shared_memory = std::construct_at(
+ reinterpret_cast<ConsoleSharedMemory*>(raw_shared_memory_ + SHARED_MEMORY_OFFSET));
+}
+
+ConsoleSixAxis::~ConsoleSixAxis() = default;
+
+void ConsoleSixAxis::OnInit() {}
+
+void ConsoleSixAxis::OnRelease() {}
+
+void ConsoleSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+ if (!IsControllerActivated()) {
+ return;
+ }
+
+ const auto motion_status = console->GetMotion();
+
+ shared_memory->sampling_number++;
+ shared_memory->is_seven_six_axis_sensor_at_rest = motion_status.is_at_rest;
+ shared_memory->verticalization_error = motion_status.verticalization_error;
+ shared_memory->gyro_bias = motion_status.gyro_bias;
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/console_six_axis.h b/src/core/hle/service/hid/controllers/console_six_axis.h
new file mode 100644
index 000000000..5b7c6a29a
--- /dev/null
+++ b/src/core/hle/service/hid/controllers/console_six_axis.h
@@ -0,0 +1,43 @@
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/vector_math.h"
+#include "core/hle/service/hid/controllers/controller_base.h"
+
+namespace Core::HID {
+class EmulatedConsole;
+} // namespace Core::HID
+
+namespace Service::HID {
+class ConsoleSixAxis final : public ControllerBase {
+public:
+ explicit ConsoleSixAxis(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
+ ~ConsoleSixAxis() override;
+
+ // Called when the controller is initialized
+ void OnInit() override;
+
+ // When the controller is released
+ void OnRelease() override;
+
+ // When the controller is requesting an update for the shared memory
+ void OnUpdate(const Core::Timing::CoreTiming& core_timing) override;
+
+private:
+ // This is nn::hid::detail::ConsoleSixAxisSensorSharedMemoryFormat
+ struct ConsoleSharedMemory {
+ u64 sampling_number{};
+ bool is_seven_six_axis_sensor_at_rest{};
+ INSERT_PADDING_BYTES(3); // padding
+ f32 verticalization_error{};
+ Common::Vec3f gyro_bias{};
+ INSERT_PADDING_BYTES(4); // padding
+ };
+ static_assert(sizeof(ConsoleSharedMemory) == 0x20, "ConsoleSharedMemory is an invalid size");
+
+ ConsoleSharedMemory* shared_memory = nullptr;
+ Core::HID::EmulatedConsole* console = nullptr;
+};
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/controller_base.cpp b/src/core/hle/service/hid/controllers/controller_base.cpp
index c58d67d7d..0bcd87062 100644
--- a/src/core/hle/service/hid/controllers/controller_base.cpp
+++ b/src/core/hle/service/hid/controllers/controller_base.cpp
@@ -8,12 +8,17 @@ namespace Service::HID {
ControllerBase::ControllerBase(Core::HID::HIDCore& hid_core_) : hid_core(hid_core_) {}
ControllerBase::~ControllerBase() = default;
-void ControllerBase::ActivateController() {
+Result ControllerBase::Activate() {
if (is_activated) {
- return;
+ return ResultSuccess;
}
is_activated = true;
OnInit();
+ return ResultSuccess;
+}
+
+Result ControllerBase::Activate(u64 aruid) {
+ return Activate();
}
void ControllerBase::DeactivateController() {
diff --git a/src/core/hle/service/hid/controllers/controller_base.h b/src/core/hle/service/hid/controllers/controller_base.h
index d6f7a5073..9a44ee41e 100644
--- a/src/core/hle/service/hid/controllers/controller_base.h
+++ b/src/core/hle/service/hid/controllers/controller_base.h
@@ -4,6 +4,7 @@
#pragma once
#include "common/common_types.h"
+#include "core/hle/result.h"
namespace Core::Timing {
class CoreTiming;
@@ -31,7 +32,8 @@ public:
// When the controller is requesting a motion update for the shared memory
virtual void OnMotionUpdate(const Core::Timing::CoreTiming& core_timing) {}
- void ActivateController();
+ Result Activate();
+ Result Activate(u64 aruid);
void DeactivateController();
diff --git a/src/core/hle/service/hid/controllers/debug_pad.cpp b/src/core/hle/service/hid/controllers/debug_pad.cpp
index 8ec9f4a95..9de19ebfc 100644
--- a/src/core/hle/service/hid/controllers/debug_pad.cpp
+++ b/src/core/hle/service/hid/controllers/debug_pad.cpp
@@ -13,7 +13,7 @@
namespace Service::HID {
constexpr std::size_t SHARED_MEMORY_OFFSET = 0x00000;
-Controller_DebugPad::Controller_DebugPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
+DebugPad::DebugPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
: ControllerBase{hid_core_} {
static_assert(SHARED_MEMORY_OFFSET + sizeof(DebugPadSharedMemory) < shared_memory_size,
"DebugPadSharedMemory is bigger than the shared memory");
@@ -22,13 +22,13 @@ Controller_DebugPad::Controller_DebugPad(Core::HID::HIDCore& hid_core_, u8* raw_
controller = hid_core.GetEmulatedController(Core::HID::NpadIdType::Other);
}
-Controller_DebugPad::~Controller_DebugPad() = default;
+DebugPad::~DebugPad() = default;
-void Controller_DebugPad::OnInit() {}
+void DebugPad::OnInit() {}
-void Controller_DebugPad::OnRelease() {}
+void DebugPad::OnRelease() {}
-void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated()) {
shared_memory->debug_pad_lifo.buffer_count = 0;
shared_memory->debug_pad_lifo.buffer_tail = 0;
diff --git a/src/core/hle/service/hid/controllers/debug_pad.h b/src/core/hle/service/hid/controllers/debug_pad.h
index 68ff0ea79..5566dba77 100644
--- a/src/core/hle/service/hid/controllers/debug_pad.h
+++ b/src/core/hle/service/hid/controllers/debug_pad.h
@@ -15,10 +15,10 @@ struct AnalogStickState;
} // namespace Core::HID
namespace Service::HID {
-class Controller_DebugPad final : public ControllerBase {
+class DebugPad final : public ControllerBase {
public:
- explicit Controller_DebugPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
- ~Controller_DebugPad() override;
+ explicit DebugPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
+ ~DebugPad() override;
// Called when the controller is initialized
void OnInit() override;
diff --git a/src/core/hle/service/hid/controllers/gesture.cpp b/src/core/hle/service/hid/controllers/gesture.cpp
index 63eecd42b..59b2ec73c 100644
--- a/src/core/hle/service/hid/controllers/gesture.cpp
+++ b/src/core/hle/service/hid/controllers/gesture.cpp
@@ -23,7 +23,7 @@ constexpr f32 Square(s32 num) {
return static_cast<f32>(num * num);
}
-Controller_Gesture::Controller_Gesture(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
+Gesture::Gesture(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
: ControllerBase(hid_core_) {
static_assert(SHARED_MEMORY_OFFSET + sizeof(GestureSharedMemory) < shared_memory_size,
"GestureSharedMemory is bigger than the shared memory");
@@ -31,17 +31,17 @@ Controller_Gesture::Controller_Gesture(Core::HID::HIDCore& hid_core_, u8* raw_sh
reinterpret_cast<GestureSharedMemory*>(raw_shared_memory_ + SHARED_MEMORY_OFFSET));
console = hid_core.GetEmulatedConsole();
}
-Controller_Gesture::~Controller_Gesture() = default;
+Gesture::~Gesture() = default;
-void Controller_Gesture::OnInit() {
+void Gesture::OnInit() {
shared_memory->gesture_lifo.buffer_count = 0;
shared_memory->gesture_lifo.buffer_tail = 0;
force_update = true;
}
-void Controller_Gesture::OnRelease() {}
+void Gesture::OnRelease() {}
-void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated()) {
shared_memory->gesture_lifo.buffer_count = 0;
shared_memory->gesture_lifo.buffer_tail = 0;
@@ -64,7 +64,7 @@ void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
UpdateGestureSharedMemory(gesture, time_difference);
}
-void Controller_Gesture::ReadTouchInput() {
+void Gesture::ReadTouchInput() {
if (!Settings::values.touchscreen.enabled) {
fingers = {};
return;
@@ -76,8 +76,7 @@ void Controller_Gesture::ReadTouchInput() {
}
}
-bool Controller_Gesture::ShouldUpdateGesture(const GestureProperties& gesture,
- f32 time_difference) {
+bool Gesture::ShouldUpdateGesture(const GestureProperties& gesture, f32 time_difference) {
const auto& last_entry = GetLastGestureEntry();
if (force_update) {
force_update = false;
@@ -100,8 +99,7 @@ bool Controller_Gesture::ShouldUpdateGesture(const GestureProperties& gesture,
return false;
}
-void Controller_Gesture::UpdateGestureSharedMemory(GestureProperties& gesture,
- f32 time_difference) {
+void Gesture::UpdateGestureSharedMemory(GestureProperties& gesture, f32 time_difference) {
GestureType type = GestureType::Idle;
GestureAttribute attributes{};
@@ -138,8 +136,8 @@ void Controller_Gesture::UpdateGestureSharedMemory(GestureProperties& gesture,
shared_memory->gesture_lifo.WriteNextEntry(next_state);
}
-void Controller_Gesture::NewGesture(GestureProperties& gesture, GestureType& type,
- GestureAttribute& attributes) {
+void Gesture::NewGesture(GestureProperties& gesture, GestureType& type,
+ GestureAttribute& attributes) {
const auto& last_entry = GetLastGestureEntry();
gesture.detection_count++;
@@ -152,8 +150,8 @@ void Controller_Gesture::NewGesture(GestureProperties& gesture, GestureType& typ
}
}
-void Controller_Gesture::UpdateExistingGesture(GestureProperties& gesture, GestureType& type,
- f32 time_difference) {
+void Gesture::UpdateExistingGesture(GestureProperties& gesture, GestureType& type,
+ f32 time_difference) {
const auto& last_entry = GetLastGestureEntry();
// Promote to pan type if touch moved
@@ -186,9 +184,8 @@ void Controller_Gesture::UpdateExistingGesture(GestureProperties& gesture, Gestu
}
}
-void Controller_Gesture::EndGesture(GestureProperties& gesture,
- GestureProperties& last_gesture_props, GestureType& type,
- GestureAttribute& attributes, f32 time_difference) {
+void Gesture::EndGesture(GestureProperties& gesture, GestureProperties& last_gesture_props,
+ GestureType& type, GestureAttribute& attributes, f32 time_difference) {
const auto& last_entry = GetLastGestureEntry();
if (last_gesture_props.active_points != 0) {
@@ -222,9 +219,8 @@ void Controller_Gesture::EndGesture(GestureProperties& gesture,
}
}
-void Controller_Gesture::SetTapEvent(GestureProperties& gesture,
- GestureProperties& last_gesture_props, GestureType& type,
- GestureAttribute& attributes) {
+void Gesture::SetTapEvent(GestureProperties& gesture, GestureProperties& last_gesture_props,
+ GestureType& type, GestureAttribute& attributes) {
type = GestureType::Tap;
gesture = last_gesture_props;
force_update = true;
@@ -236,9 +232,8 @@ void Controller_Gesture::SetTapEvent(GestureProperties& gesture,
}
}
-void Controller_Gesture::UpdatePanEvent(GestureProperties& gesture,
- GestureProperties& last_gesture_props, GestureType& type,
- f32 time_difference) {
+void Gesture::UpdatePanEvent(GestureProperties& gesture, GestureProperties& last_gesture_props,
+ GestureType& type, f32 time_difference) {
const auto& last_entry = GetLastGestureEntry();
next_state.delta = gesture.mid_point - last_entry.pos;
@@ -263,9 +258,8 @@ void Controller_Gesture::UpdatePanEvent(GestureProperties& gesture,
}
}
-void Controller_Gesture::EndPanEvent(GestureProperties& gesture,
- GestureProperties& last_gesture_props, GestureType& type,
- f32 time_difference) {
+void Gesture::EndPanEvent(GestureProperties& gesture, GestureProperties& last_gesture_props,
+ GestureType& type, f32 time_difference) {
const auto& last_entry = GetLastGestureEntry();
next_state.vel_x =
static_cast<f32>(last_entry.delta.x) / (last_pan_time_difference + time_difference);
@@ -287,8 +281,8 @@ void Controller_Gesture::EndPanEvent(GestureProperties& gesture,
force_update = true;
}
-void Controller_Gesture::SetSwipeEvent(GestureProperties& gesture,
- GestureProperties& last_gesture_props, GestureType& type) {
+void Gesture::SetSwipeEvent(GestureProperties& gesture, GestureProperties& last_gesture_props,
+ GestureType& type) {
const auto& last_entry = GetLastGestureEntry();
type = GestureType::Swipe;
@@ -311,11 +305,11 @@ void Controller_Gesture::SetSwipeEvent(GestureProperties& gesture,
next_state.direction = GestureDirection::Up;
}
-const Controller_Gesture::GestureState& Controller_Gesture::GetLastGestureEntry() const {
+const Gesture::GestureState& Gesture::GetLastGestureEntry() const {
return shared_memory->gesture_lifo.ReadCurrentEntry().state;
}
-Controller_Gesture::GestureProperties Controller_Gesture::GetGestureProperties() {
+Gesture::GestureProperties Gesture::GetGestureProperties() {
GestureProperties gesture;
std::array<Core::HID::TouchFinger, MAX_POINTS> active_fingers;
const auto end_iter = std::copy_if(fingers.begin(), fingers.end(), active_fingers.begin(),
diff --git a/src/core/hle/service/hid/controllers/gesture.h b/src/core/hle/service/hid/controllers/gesture.h
index 0d6099ea0..4c6f8ee07 100644
--- a/src/core/hle/service/hid/controllers/gesture.h
+++ b/src/core/hle/service/hid/controllers/gesture.h
@@ -12,10 +12,10 @@
#include "core/hle/service/hid/ring_lifo.h"
namespace Service::HID {
-class Controller_Gesture final : public ControllerBase {
+class Gesture final : public ControllerBase {
public:
- explicit Controller_Gesture(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
- ~Controller_Gesture() override;
+ explicit Gesture(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
+ ~Gesture() override;
// Called when the controller is initialized
void OnInit() override;
diff --git a/src/core/hle/service/hid/controllers/keyboard.cpp b/src/core/hle/service/hid/controllers/keyboard.cpp
index 117d87433..ddb1b0ba4 100644
--- a/src/core/hle/service/hid/controllers/keyboard.cpp
+++ b/src/core/hle/service/hid/controllers/keyboard.cpp
@@ -12,7 +12,7 @@
namespace Service::HID {
constexpr std::size_t SHARED_MEMORY_OFFSET = 0x3800;
-Controller_Keyboard::Controller_Keyboard(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
+Keyboard::Keyboard(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
: ControllerBase{hid_core_} {
static_assert(SHARED_MEMORY_OFFSET + sizeof(KeyboardSharedMemory) < shared_memory_size,
"KeyboardSharedMemory is bigger than the shared memory");
@@ -21,13 +21,13 @@ Controller_Keyboard::Controller_Keyboard(Core::HID::HIDCore& hid_core_, u8* raw_
emulated_devices = hid_core.GetEmulatedDevices();
}
-Controller_Keyboard::~Controller_Keyboard() = default;
+Keyboard::~Keyboard() = default;
-void Controller_Keyboard::OnInit() {}
+void Keyboard::OnInit() {}
-void Controller_Keyboard::OnRelease() {}
+void Keyboard::OnRelease() {}
-void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated()) {
shared_memory->keyboard_lifo.buffer_count = 0;
shared_memory->keyboard_lifo.buffer_tail = 0;
diff --git a/src/core/hle/service/hid/controllers/keyboard.h b/src/core/hle/service/hid/controllers/keyboard.h
index 7532f53c6..172ec1309 100644
--- a/src/core/hle/service/hid/controllers/keyboard.h
+++ b/src/core/hle/service/hid/controllers/keyboard.h
@@ -14,10 +14,10 @@ struct KeyboardKey;
} // namespace Core::HID
namespace Service::HID {
-class Controller_Keyboard final : public ControllerBase {
+class Keyboard final : public ControllerBase {
public:
- explicit Controller_Keyboard(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
- ~Controller_Keyboard() override;
+ explicit Keyboard(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
+ ~Keyboard() override;
// Called when the controller is initialized
void OnInit() override;
diff --git a/src/core/hle/service/hid/controllers/mouse.cpp b/src/core/hle/service/hid/controllers/mouse.cpp
index 0afc66681..6e5a04e34 100644
--- a/src/core/hle/service/hid/controllers/mouse.cpp
+++ b/src/core/hle/service/hid/controllers/mouse.cpp
@@ -12,8 +12,7 @@
namespace Service::HID {
constexpr std::size_t SHARED_MEMORY_OFFSET = 0x3400;
-Controller_Mouse::Controller_Mouse(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
- : ControllerBase{hid_core_} {
+Mouse::Mouse(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_) : ControllerBase{hid_core_} {
static_assert(SHARED_MEMORY_OFFSET + sizeof(MouseSharedMemory) < shared_memory_size,
"MouseSharedMemory is bigger than the shared memory");
shared_memory = std::construct_at(
@@ -21,12 +20,12 @@ Controller_Mouse::Controller_Mouse(Core::HID::HIDCore& hid_core_, u8* raw_shared
emulated_devices = hid_core.GetEmulatedDevices();
}
-Controller_Mouse::~Controller_Mouse() = default;
+Mouse::~Mouse() = default;
-void Controller_Mouse::OnInit() {}
-void Controller_Mouse::OnRelease() {}
+void Mouse::OnInit() {}
+void Mouse::OnRelease() {}
-void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated()) {
shared_memory->mouse_lifo.buffer_count = 0;
shared_memory->mouse_lifo.buffer_tail = 0;
diff --git a/src/core/hle/service/hid/controllers/mouse.h b/src/core/hle/service/hid/controllers/mouse.h
index 733d00577..a80f3823f 100644
--- a/src/core/hle/service/hid/controllers/mouse.h
+++ b/src/core/hle/service/hid/controllers/mouse.h
@@ -14,10 +14,10 @@ struct AnalogStickState;
} // namespace Core::HID
namespace Service::HID {
-class Controller_Mouse final : public ControllerBase {
+class Mouse final : public ControllerBase {
public:
- explicit Controller_Mouse(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
- ~Controller_Mouse() override;
+ explicit Mouse(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
+ ~Mouse() override;
// Called when the controller is initialized
void OnInit() override;
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index bc822f19e..08ee9de9c 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -18,6 +18,7 @@
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/service/hid/controllers/npad.h"
#include "core/hle/service/hid/errors.h"
+#include "core/hle/service/hid/hid_util.h"
#include "core/hle/service/kernel_helpers.h"
namespace Service::HID {
@@ -29,60 +30,8 @@ constexpr std::array<Core::HID::NpadIdType, 10> npad_id_list{
Core::HID::NpadIdType::Handheld,
};
-bool Controller_NPad::IsNpadIdValid(Core::HID::NpadIdType npad_id) {
- switch (npad_id) {
- case Core::HID::NpadIdType::Player1:
- case Core::HID::NpadIdType::Player2:
- case Core::HID::NpadIdType::Player3:
- case Core::HID::NpadIdType::Player4:
- case Core::HID::NpadIdType::Player5:
- case Core::HID::NpadIdType::Player6:
- case Core::HID::NpadIdType::Player7:
- case Core::HID::NpadIdType::Player8:
- case Core::HID::NpadIdType::Other:
- case Core::HID::NpadIdType::Handheld:
- return true;
- default:
- LOG_ERROR(Service_HID, "Invalid npad id {}", npad_id);
- return false;
- }
-}
-
-Result Controller_NPad::IsDeviceHandleValid(const Core::HID::VibrationDeviceHandle& device_handle) {
- const auto npad_id = IsNpadIdValid(static_cast<Core::HID::NpadIdType>(device_handle.npad_id));
- const bool npad_type = device_handle.npad_type < Core::HID::NpadStyleIndex::MaxNpadType;
- const bool device_index = device_handle.device_index < Core::HID::DeviceIndex::MaxDeviceIndex;
-
- if (!npad_type) {
- return VibrationInvalidStyleIndex;
- }
- if (!npad_id) {
- return VibrationInvalidNpadId;
- }
- if (!device_index) {
- return VibrationDeviceIndexOutOfRange;
- }
-
- return ResultSuccess;
-}
-
-Result Controller_NPad::VerifyValidSixAxisSensorHandle(
- const Core::HID::SixAxisSensorHandle& device_handle) {
- const auto npad_id = IsNpadIdValid(static_cast<Core::HID::NpadIdType>(device_handle.npad_id));
- const bool device_index = device_handle.device_index < Core::HID::DeviceIndex::MaxDeviceIndex;
-
- if (!npad_id) {
- return InvalidNpadId;
- }
- if (!device_index) {
- return NpadDeviceIndexOutOfRange;
- }
-
- return ResultSuccess;
-}
-
-Controller_NPad::Controller_NPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
- KernelHelpers::ServiceContext& service_context_)
+NPad::NPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
+ KernelHelpers::ServiceContext& service_context_)
: ControllerBase{hid_core_}, service_context{service_context_} {
static_assert(NPAD_OFFSET + (NPAD_COUNT * sizeof(NpadInternalState)) < shared_memory_size);
for (std::size_t i = 0; i < controller_data.size(); ++i) {
@@ -103,7 +52,7 @@ Controller_NPad::Controller_NPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_m
}
}
-Controller_NPad::~Controller_NPad() {
+NPad::~NPad() {
for (std::size_t i = 0; i < controller_data.size(); ++i) {
auto& controller = controller_data[i];
controller.device->DeleteCallback(controller.callback_key);
@@ -111,8 +60,7 @@ Controller_NPad::~Controller_NPad() {
OnRelease();
}
-void Controller_NPad::ControllerUpdate(Core::HID::ControllerTriggerType type,
- std::size_t controller_idx) {
+void NPad::ControllerUpdate(Core::HID::ControllerTriggerType type, std::size_t controller_idx) {
if (type == Core::HID::ControllerTriggerType::All) {
ControllerUpdate(Core::HID::ControllerTriggerType::Connected, controller_idx);
ControllerUpdate(Core::HID::ControllerTriggerType::Battery, controller_idx);
@@ -150,7 +98,7 @@ void Controller_NPad::ControllerUpdate(Core::HID::ControllerTriggerType type,
}
}
-void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
+void NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
auto& controller = GetControllerFromNpadIdType(npad_id);
if (!IsControllerSupported(controller.device->GetNpadStyleIndex())) {
return;
@@ -344,12 +292,13 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
controller.device->SetPollingMode(Core::HID::EmulatedDeviceIndex::AllDevices,
Common::Input::PollingMode::Active);
}
+
SignalStyleSetChangedEvent(npad_id);
WriteEmptyEntry(controller.shared_memory);
hid_core.SetLastActiveController(npad_id);
}
-void Controller_NPad::OnInit() {
+void NPad::OnInit() {
if (!IsControllerActivated()) {
return;
}
@@ -383,7 +332,7 @@ void Controller_NPad::OnInit() {
}
}
-void Controller_NPad::WriteEmptyEntry(NpadInternalState* npad) {
+void NPad::WriteEmptyEntry(NpadInternalState* npad) {
NPadGenericState dummy_pad_state{};
NpadGcTriggerState dummy_gc_state{};
dummy_pad_state.sampling_number = npad->fullkey_lifo.ReadCurrentEntry().sampling_number + 1;
@@ -404,7 +353,7 @@ void Controller_NPad::WriteEmptyEntry(NpadInternalState* npad) {
npad->gc_trigger_lifo.WriteNextEntry(dummy_gc_state);
}
-void Controller_NPad::OnRelease() {
+void NPad::OnRelease() {
is_controller_initialized = false;
for (std::size_t i = 0; i < controller_data.size(); ++i) {
auto& controller = controller_data[i];
@@ -415,7 +364,7 @@ void Controller_NPad::OnRelease() {
}
}
-void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
+void NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
std::scoped_lock lock{mutex};
auto& controller = GetControllerFromNpadIdType(npad_id);
const auto controller_type = controller.device->GetNpadStyleIndex();
@@ -457,12 +406,14 @@ void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
pad_entry.l_stick = stick_state.left;
}
- if (controller_type == Core::HID::NpadStyleIndex::JoyconLeft) {
+ if (controller_type == Core::HID::NpadStyleIndex::JoyconLeft ||
+ controller_type == Core::HID::NpadStyleIndex::JoyconDual) {
pad_entry.npad_buttons.left_sl.Assign(button_state.left_sl);
pad_entry.npad_buttons.left_sr.Assign(button_state.left_sr);
}
- if (controller_type == Core::HID::NpadStyleIndex::JoyconRight) {
+ if (controller_type == Core::HID::NpadStyleIndex::JoyconRight ||
+ controller_type == Core::HID::NpadStyleIndex::JoyconDual) {
pad_entry.npad_buttons.right_sl.Assign(button_state.right_sl);
pad_entry.npad_buttons.right_sr.Assign(button_state.right_sr);
}
@@ -482,7 +433,7 @@ void Controller_NPad::RequestPadStateUpdate(Core::HID::NpadIdType npad_id) {
}
}
-void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated()) {
return;
}
@@ -612,134 +563,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
}
}
-void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing) {
- if (!IsControllerActivated()) {
- return;
- }
-
- for (std::size_t i = 0; i < controller_data.size(); ++i) {
- auto& controller = controller_data[i];
-
- const auto& controller_type = controller.device->GetNpadStyleIndex();
-
- if (controller_type == Core::HID::NpadStyleIndex::None ||
- !controller.device->IsConnected()) {
- continue;
- }
-
- auto* npad = controller.shared_memory;
- const auto& motion_state = controller.device->GetMotions();
- auto& sixaxis_fullkey_state = controller.sixaxis_fullkey_state;
- auto& sixaxis_handheld_state = controller.sixaxis_handheld_state;
- auto& sixaxis_dual_left_state = controller.sixaxis_dual_left_state;
- auto& sixaxis_dual_right_state = controller.sixaxis_dual_right_state;
- auto& sixaxis_left_lifo_state = controller.sixaxis_left_lifo_state;
- auto& sixaxis_right_lifo_state = controller.sixaxis_right_lifo_state;
-
- // Clear previous state
- sixaxis_fullkey_state = {};
- sixaxis_handheld_state = {};
- sixaxis_dual_left_state = {};
- sixaxis_dual_right_state = {};
- sixaxis_left_lifo_state = {};
- sixaxis_right_lifo_state = {};
-
- if (controller.sixaxis_sensor_enabled && Settings::values.motion_enabled.GetValue()) {
- controller.sixaxis_at_rest = true;
- for (std::size_t e = 0; e < motion_state.size(); ++e) {
- controller.sixaxis_at_rest =
- controller.sixaxis_at_rest && motion_state[e].is_at_rest;
- }
- }
-
- const auto set_motion_state = [&](SixAxisSensorState& state,
- const Core::HID::ControllerMotion& hid_state) {
- using namespace std::literals::chrono_literals;
- static constexpr SixAxisSensorState default_motion_state = {
- .delta_time = std::chrono::nanoseconds(5ms).count(),
- .accel = {0, 0, -1.0f},
- .orientation =
- {
- Common::Vec3f{1.0f, 0, 0},
- Common::Vec3f{0, 1.0f, 0},
- Common::Vec3f{0, 0, 1.0f},
- },
- .attribute = {1},
- };
- if (!controller.sixaxis_sensor_enabled) {
- state = default_motion_state;
- return;
- }
- if (!Settings::values.motion_enabled.GetValue()) {
- state = default_motion_state;
- return;
- }
- state.attribute.is_connected.Assign(1);
- state.delta_time = std::chrono::nanoseconds(5ms).count();
- state.accel = hid_state.accel;
- state.gyro = hid_state.gyro;
- state.rotation = hid_state.rotation;
- state.orientation = hid_state.orientation;
- };
-
- switch (controller_type) {
- case Core::HID::NpadStyleIndex::None:
- ASSERT(false);
- break;
- case Core::HID::NpadStyleIndex::ProController:
- set_motion_state(sixaxis_fullkey_state, motion_state[0]);
- break;
- case Core::HID::NpadStyleIndex::Handheld:
- set_motion_state(sixaxis_handheld_state, motion_state[0]);
- break;
- case Core::HID::NpadStyleIndex::JoyconDual:
- set_motion_state(sixaxis_dual_left_state, motion_state[0]);
- set_motion_state(sixaxis_dual_right_state, motion_state[1]);
- break;
- case Core::HID::NpadStyleIndex::JoyconLeft:
- set_motion_state(sixaxis_left_lifo_state, motion_state[0]);
- break;
- case Core::HID::NpadStyleIndex::JoyconRight:
- set_motion_state(sixaxis_right_lifo_state, motion_state[1]);
- break;
- case Core::HID::NpadStyleIndex::Pokeball:
- using namespace std::literals::chrono_literals;
- set_motion_state(sixaxis_fullkey_state, motion_state[0]);
- sixaxis_fullkey_state.delta_time = std::chrono::nanoseconds(15ms).count();
- break;
- default:
- break;
- }
-
- sixaxis_fullkey_state.sampling_number =
- npad->sixaxis_fullkey_lifo.ReadCurrentEntry().state.sampling_number + 1;
- sixaxis_handheld_state.sampling_number =
- npad->sixaxis_handheld_lifo.ReadCurrentEntry().state.sampling_number + 1;
- sixaxis_dual_left_state.sampling_number =
- npad->sixaxis_dual_left_lifo.ReadCurrentEntry().state.sampling_number + 1;
- sixaxis_dual_right_state.sampling_number =
- npad->sixaxis_dual_right_lifo.ReadCurrentEntry().state.sampling_number + 1;
- sixaxis_left_lifo_state.sampling_number =
- npad->sixaxis_left_lifo.ReadCurrentEntry().state.sampling_number + 1;
- sixaxis_right_lifo_state.sampling_number =
- npad->sixaxis_right_lifo.ReadCurrentEntry().state.sampling_number + 1;
-
- if (Core::HID::IndexToNpadIdType(i) == Core::HID::NpadIdType::Handheld) {
- // This buffer only is updated on handheld on HW
- npad->sixaxis_handheld_lifo.WriteNextEntry(sixaxis_handheld_state);
- } else {
- // Handheld doesn't update this buffer on HW
- npad->sixaxis_fullkey_lifo.WriteNextEntry(sixaxis_fullkey_state);
- }
-
- npad->sixaxis_dual_left_lifo.WriteNextEntry(sixaxis_dual_left_state);
- npad->sixaxis_dual_right_lifo.WriteNextEntry(sixaxis_dual_right_state);
- npad->sixaxis_left_lifo.WriteNextEntry(sixaxis_left_lifo_state);
- npad->sixaxis_right_lifo.WriteNextEntry(sixaxis_right_lifo_state);
- }
-}
-
-void Controller_NPad::SetSupportedStyleSet(Core::HID::NpadStyleTag style_set) {
+void NPad::SetSupportedStyleSet(Core::HID::NpadStyleTag style_set) {
hid_core.SetSupportedStyleTag(style_set);
if (is_controller_initialized) {
@@ -750,14 +574,14 @@ void Controller_NPad::SetSupportedStyleSet(Core::HID::NpadStyleTag style_set) {
is_controller_initialized = true;
}
-Core::HID::NpadStyleTag Controller_NPad::GetSupportedStyleSet() const {
+Core::HID::NpadStyleTag NPad::GetSupportedStyleSet() const {
if (!is_controller_initialized) {
return {Core::HID::NpadStyleSet::None};
}
return hid_core.GetSupportedStyleTag();
}
-Result Controller_NPad::SetSupportedNpadIdTypes(std::span<const u8> data) {
+Result NPad::SetSupportedNpadIdTypes(std::span<const u8> data) {
constexpr std::size_t max_number_npad_ids = 0xa;
const auto length = data.size();
ASSERT(length > 0 && (length % sizeof(u32)) == 0);
@@ -773,17 +597,17 @@ Result Controller_NPad::SetSupportedNpadIdTypes(std::span<const u8> data) {
return ResultSuccess;
}
-void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) {
+void NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) {
const auto copy_amount = supported_npad_id_types.size() * sizeof(u32);
ASSERT(max_length <= copy_amount);
std::memcpy(data, supported_npad_id_types.data(), copy_amount);
}
-std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const {
+std::size_t NPad::GetSupportedNpadIdTypesSize() const {
return supported_npad_id_types.size();
}
-void Controller_NPad::SetHoldType(NpadJoyHoldType joy_hold_type) {
+void NPad::SetHoldType(NpadJoyHoldType joy_hold_type) {
if (joy_hold_type != NpadJoyHoldType::Horizontal &&
joy_hold_type != NpadJoyHoldType::Vertical) {
LOG_ERROR(Service_HID, "Npad joy hold type needs to be valid, joy_hold_type={}",
@@ -793,11 +617,11 @@ void Controller_NPad::SetHoldType(NpadJoyHoldType joy_hold_type) {
hold_type = joy_hold_type;
}
-Controller_NPad::NpadJoyHoldType Controller_NPad::GetHoldType() const {
+NPad::NpadJoyHoldType NPad::GetHoldType() const {
return hold_type;
}
-void Controller_NPad::SetNpadHandheldActivationMode(NpadHandheldActivationMode activation_mode) {
+void NPad::SetNpadHandheldActivationMode(NpadHandheldActivationMode activation_mode) {
if (activation_mode >= NpadHandheldActivationMode::MaxActivationMode) {
ASSERT_MSG(false, "Activation mode should be always None, Single or Dual");
return;
@@ -806,21 +630,20 @@ void Controller_NPad::SetNpadHandheldActivationMode(NpadHandheldActivationMode a
handheld_activation_mode = activation_mode;
}
-Controller_NPad::NpadHandheldActivationMode Controller_NPad::GetNpadHandheldActivationMode() const {
+NPad::NpadHandheldActivationMode NPad::GetNpadHandheldActivationMode() const {
return handheld_activation_mode;
}
-void Controller_NPad::SetNpadCommunicationMode(NpadCommunicationMode communication_mode_) {
+void NPad::SetNpadCommunicationMode(NpadCommunicationMode communication_mode_) {
communication_mode = communication_mode_;
}
-Controller_NPad::NpadCommunicationMode Controller_NPad::GetNpadCommunicationMode() const {
+NPad::NpadCommunicationMode NPad::GetNpadCommunicationMode() const {
return communication_mode;
}
-bool Controller_NPad::SetNpadMode(Core::HID::NpadIdType& new_npad_id, Core::HID::NpadIdType npad_id,
- NpadJoyDeviceType npad_device_type,
- NpadJoyAssignmentMode assignment_mode) {
+bool NPad::SetNpadMode(Core::HID::NpadIdType& new_npad_id, Core::HID::NpadIdType npad_id,
+ NpadJoyDeviceType npad_device_type, NpadJoyAssignmentMode assignment_mode) {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
return false;
@@ -889,9 +712,8 @@ bool Controller_NPad::SetNpadMode(Core::HID::NpadIdType& new_npad_id, Core::HID:
return true;
}
-bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
- std::size_t device_index,
- const Core::HID::VibrationValue& vibration_value) {
+bool NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id, std::size_t device_index,
+ const Core::HID::VibrationValue& vibration_value) {
auto& controller = GetControllerFromNpadIdType(npad_id);
if (!controller.device->IsConnected()) {
return false;
@@ -935,10 +757,9 @@ bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
return controller.device->SetVibration(device_index, vibration);
}
-void Controller_NPad::VibrateController(
- const Core::HID::VibrationDeviceHandle& vibration_device_handle,
- const Core::HID::VibrationValue& vibration_value) {
- if (IsDeviceHandleValid(vibration_device_handle).IsError()) {
+void NPad::VibrateController(const Core::HID::VibrationDeviceHandle& vibration_device_handle,
+ const Core::HID::VibrationValue& vibration_value) {
+ if (IsVibrationHandleValid(vibration_device_handle).IsError()) {
return;
}
@@ -982,7 +803,7 @@ void Controller_NPad::VibrateController(
}
}
-void Controller_NPad::VibrateControllers(
+void NPad::VibrateControllers(
std::span<const Core::HID::VibrationDeviceHandle> vibration_device_handles,
std::span<const Core::HID::VibrationValue> vibration_values) {
if (!Settings::values.vibration_enabled.GetValue() && !permit_vibration_session_enabled) {
@@ -999,9 +820,9 @@ void Controller_NPad::VibrateControllers(
}
}
-Core::HID::VibrationValue Controller_NPad::GetLastVibration(
+Core::HID::VibrationValue NPad::GetLastVibration(
const Core::HID::VibrationDeviceHandle& vibration_device_handle) const {
- if (IsDeviceHandleValid(vibration_device_handle).IsError()) {
+ if (IsVibrationHandleValid(vibration_device_handle).IsError()) {
return {};
}
@@ -1010,9 +831,9 @@ Core::HID::VibrationValue Controller_NPad::GetLastVibration(
return controller.vibration[device_index].latest_vibration_value;
}
-void Controller_NPad::InitializeVibrationDevice(
+void NPad::InitializeVibrationDevice(
const Core::HID::VibrationDeviceHandle& vibration_device_handle) {
- if (IsDeviceHandleValid(vibration_device_handle).IsError()) {
+ if (IsVibrationHandleValid(vibration_device_handle).IsError()) {
return;
}
@@ -1021,8 +842,8 @@ void Controller_NPad::InitializeVibrationDevice(
InitializeVibrationDeviceAtIndex(npad_index, device_index);
}
-void Controller_NPad::InitializeVibrationDeviceAtIndex(Core::HID::NpadIdType npad_id,
- std::size_t device_index) {
+void NPad::InitializeVibrationDeviceAtIndex(Core::HID::NpadIdType npad_id,
+ std::size_t device_index) {
auto& controller = GetControllerFromNpadIdType(npad_id);
if (!Settings::values.vibration_enabled.GetValue()) {
controller.vibration[device_index].device_mounted = false;
@@ -1033,13 +854,13 @@ void Controller_NPad::InitializeVibrationDeviceAtIndex(Core::HID::NpadIdType npa
controller.device->IsVibrationEnabled(device_index);
}
-void Controller_NPad::SetPermitVibrationSession(bool permit_vibration_session) {
+void NPad::SetPermitVibrationSession(bool permit_vibration_session) {
permit_vibration_session_enabled = permit_vibration_session;
}
-bool Controller_NPad::IsVibrationDeviceMounted(
+bool NPad::IsVibrationDeviceMounted(
const Core::HID::VibrationDeviceHandle& vibration_device_handle) const {
- if (IsDeviceHandleValid(vibration_device_handle).IsError()) {
+ if (IsVibrationHandleValid(vibration_device_handle).IsError()) {
return false;
}
@@ -1048,7 +869,7 @@ bool Controller_NPad::IsVibrationDeviceMounted(
return controller.vibration[device_index].device_mounted;
}
-Kernel::KReadableEvent& Controller_NPad::GetStyleSetChangedEvent(Core::HID::NpadIdType npad_id) {
+Kernel::KReadableEvent& NPad::GetStyleSetChangedEvent(Core::HID::NpadIdType npad_id) {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
// Fallback to player 1
@@ -1060,18 +881,17 @@ Kernel::KReadableEvent& Controller_NPad::GetStyleSetChangedEvent(Core::HID::Npad
return controller.styleset_changed_event->GetReadableEvent();
}
-void Controller_NPad::SignalStyleSetChangedEvent(Core::HID::NpadIdType npad_id) const {
+void NPad::SignalStyleSetChangedEvent(Core::HID::NpadIdType npad_id) const {
const auto& controller = GetControllerFromNpadIdType(npad_id);
controller.styleset_changed_event->Signal();
}
-void Controller_NPad::AddNewControllerAt(Core::HID::NpadStyleIndex controller,
- Core::HID::NpadIdType npad_id) {
+void NPad::AddNewControllerAt(Core::HID::NpadStyleIndex controller, Core::HID::NpadIdType npad_id) {
UpdateControllerAt(controller, npad_id, true);
}
-void Controller_NPad::UpdateControllerAt(Core::HID::NpadStyleIndex type,
- Core::HID::NpadIdType npad_id, bool connected) {
+void NPad::UpdateControllerAt(Core::HID::NpadStyleIndex type, Core::HID::NpadIdType npad_id,
+ bool connected) {
auto& controller = GetControllerFromNpadIdType(npad_id);
if (!connected) {
DisconnectNpad(npad_id);
@@ -1082,7 +902,7 @@ void Controller_NPad::UpdateControllerAt(Core::HID::NpadStyleIndex type,
InitNewlyAddedController(npad_id);
}
-Result Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
+Result NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
return InvalidNpadId;
@@ -1108,9 +928,9 @@ Result Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
shared_memory->sixaxis_dual_right_properties.raw = 0;
shared_memory->sixaxis_left_properties.raw = 0;
shared_memory->sixaxis_right_properties.raw = 0;
- shared_memory->battery_level_dual = 0;
- shared_memory->battery_level_left = 0;
- shared_memory->battery_level_right = 0;
+ shared_memory->battery_level_dual = Core::HID::NpadBatteryLevel::Empty;
+ shared_memory->battery_level_left = Core::HID::NpadBatteryLevel::Empty;
+ shared_memory->battery_level_right = Core::HID::NpadBatteryLevel::Empty;
shared_memory->fullkey_color = {
.attribute = ColorAttribute::NoController,
.fullkey = {},
@@ -1131,54 +951,9 @@ Result Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
return ResultSuccess;
}
-Result Controller_NPad::SetGyroscopeZeroDriftMode(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::GyroscopeZeroDriftMode drift_mode) {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- auto& sixaxis = GetSixaxisState(sixaxis_handle);
- auto& controller = GetControllerFromHandle(sixaxis_handle);
- sixaxis.gyroscope_zero_drift_mode = drift_mode;
- controller.device->SetGyroscopeZeroDriftMode(drift_mode);
-
- return ResultSuccess;
-}
-
-Result Controller_NPad::GetGyroscopeZeroDriftMode(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::GyroscopeZeroDriftMode& drift_mode) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- const auto& sixaxis = GetSixaxisState(sixaxis_handle);
- drift_mode = sixaxis.gyroscope_zero_drift_mode;
-
- return ResultSuccess;
-}
-
-Result Controller_NPad::IsSixAxisSensorAtRest(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- bool& is_at_rest) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- const auto& controller = GetControllerFromHandle(sixaxis_handle);
- is_at_rest = controller.sixaxis_at_rest;
- return ResultSuccess;
-}
-
-Result Controller_NPad::IsFirmwareUpdateAvailableForSixAxisSensor(
+Result NPad::IsFirmwareUpdateAvailableForSixAxisSensor(
const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool& is_firmware_available) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
if (is_valid.IsError()) {
LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
return is_valid;
@@ -1189,65 +964,9 @@ Result Controller_NPad::IsFirmwareUpdateAvailableForSixAxisSensor(
return ResultSuccess;
}
-Result Controller_NPad::EnableSixAxisSensorUnalteredPassthrough(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool is_enabled) {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- auto& sixaxis = GetSixaxisState(sixaxis_handle);
- sixaxis.unaltered_passtrough = is_enabled;
- return ResultSuccess;
-}
-
-Result Controller_NPad::IsSixAxisSensorUnalteredPassthroughEnabled(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool& is_enabled) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- const auto& sixaxis = GetSixaxisState(sixaxis_handle);
- is_enabled = sixaxis.unaltered_passtrough;
- return ResultSuccess;
-}
-
-Result Controller_NPad::LoadSixAxisSensorCalibrationParameter(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorCalibrationParameter& calibration) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- // TODO: Request this data to the controller. On error return 0xd8ca
- const auto& sixaxis = GetSixaxisState(sixaxis_handle);
- calibration = sixaxis.calibration;
- return ResultSuccess;
-}
-
-Result Controller_NPad::GetSixAxisSensorIcInformation(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorIcInformation& ic_information) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- // TODO: Request this data to the controller. On error return 0xd8ca
- const auto& sixaxis = GetSixaxisState(sixaxis_handle);
- ic_information = sixaxis.ic_information;
- return ResultSuccess;
-}
-
-Result Controller_NPad::ResetIsSixAxisSensorDeviceNewlyAssigned(
+Result NPad::ResetIsSixAxisSensorDeviceNewlyAssigned(
const Core::HID::SixAxisSensorHandle& sixaxis_handle) {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
if (is_valid.IsError()) {
LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
return is_valid;
@@ -1259,83 +978,32 @@ Result Controller_NPad::ResetIsSixAxisSensorDeviceNewlyAssigned(
return ResultSuccess;
}
-Result Controller_NPad::SetSixAxisEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- bool sixaxis_status) {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- auto& controller = GetControllerFromHandle(sixaxis_handle);
- controller.sixaxis_sensor_enabled = sixaxis_status;
- return ResultSuccess;
+NPad::SixAxisLifo& NPad::GetSixAxisFullkeyLifo(Core::HID::NpadIdType npad_id) {
+ return GetControllerFromNpadIdType(npad_id).shared_memory->sixaxis_fullkey_lifo;
}
-Result Controller_NPad::IsSixAxisSensorFusionEnabled(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool& is_fusion_enabled) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- const auto& sixaxis = GetSixaxisState(sixaxis_handle);
- is_fusion_enabled = sixaxis.is_fusion_enabled;
-
- return ResultSuccess;
+NPad::SixAxisLifo& NPad::GetSixAxisHandheldLifo(Core::HID::NpadIdType npad_id) {
+ return GetControllerFromNpadIdType(npad_id).shared_memory->sixaxis_handheld_lifo;
}
-Result Controller_NPad::SetSixAxisFusionEnabled(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool is_fusion_enabled) {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
- auto& sixaxis = GetSixaxisState(sixaxis_handle);
- sixaxis.is_fusion_enabled = is_fusion_enabled;
-
- return ResultSuccess;
+NPad::SixAxisLifo& NPad::GetSixAxisDualLeftLifo(Core::HID::NpadIdType npad_id) {
+ return GetControllerFromNpadIdType(npad_id).shared_memory->sixaxis_dual_left_lifo;
}
-Result Controller_NPad::SetSixAxisFusionParameters(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorFusionParameters sixaxis_fusion_parameters) {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- const auto param1 = sixaxis_fusion_parameters.parameter1;
- if (param1 < 0.0f || param1 > 1.0f) {
- return InvalidSixAxisFusionRange;
- }
-
- auto& sixaxis = GetSixaxisState(sixaxis_handle);
- sixaxis.fusion = sixaxis_fusion_parameters;
-
- return ResultSuccess;
+NPad::SixAxisLifo& NPad::GetSixAxisDualRightLifo(Core::HID::NpadIdType npad_id) {
+ return GetControllerFromNpadIdType(npad_id).shared_memory->sixaxis_dual_right_lifo;
}
-Result Controller_NPad::GetSixAxisFusionParameters(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorFusionParameters& parameters) const {
- const auto is_valid = VerifyValidSixAxisSensorHandle(sixaxis_handle);
- if (is_valid.IsError()) {
- LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
- return is_valid;
- }
-
- const auto& sixaxis = GetSixaxisState(sixaxis_handle);
- parameters = sixaxis.fusion;
+NPad::SixAxisLifo& NPad::GetSixAxisLeftLifo(Core::HID::NpadIdType npad_id) {
+ return GetControllerFromNpadIdType(npad_id).shared_memory->sixaxis_left_lifo;
+}
- return ResultSuccess;
+NPad::SixAxisLifo& NPad::GetSixAxisRightLifo(Core::HID::NpadIdType npad_id) {
+ return GetControllerFromNpadIdType(npad_id).shared_memory->sixaxis_right_lifo;
}
-Result Controller_NPad::MergeSingleJoyAsDualJoy(Core::HID::NpadIdType npad_id_1,
- Core::HID::NpadIdType npad_id_2) {
+Result NPad::MergeSingleJoyAsDualJoy(Core::HID::NpadIdType npad_id_1,
+ Core::HID::NpadIdType npad_id_2) {
if (!IsNpadIdValid(npad_id_1) || !IsNpadIdValid(npad_id_2)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id_1:{}, npad_id_2:{}", npad_id_1,
npad_id_2);
@@ -1397,18 +1065,17 @@ Result Controller_NPad::MergeSingleJoyAsDualJoy(Core::HID::NpadIdType npad_id_1,
return ResultSuccess;
}
-void Controller_NPad::StartLRAssignmentMode() {
+void NPad::StartLRAssignmentMode() {
// Nothing internally is used for lr assignment mode. Since we have the ability to set the
// controller types from boot, it doesn't really matter about showing a selection screen
is_in_lr_assignment_mode = true;
}
-void Controller_NPad::StopLRAssignmentMode() {
+void NPad::StopLRAssignmentMode() {
is_in_lr_assignment_mode = false;
}
-Result Controller_NPad::SwapNpadAssignment(Core::HID::NpadIdType npad_id_1,
- Core::HID::NpadIdType npad_id_2) {
+Result NPad::SwapNpadAssignment(Core::HID::NpadIdType npad_id_1, Core::HID::NpadIdType npad_id_2) {
if (!IsNpadIdValid(npad_id_1) || !IsNpadIdValid(npad_id_2)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id_1:{}, npad_id_2:{}", npad_id_1,
npad_id_2);
@@ -1439,8 +1106,7 @@ Result Controller_NPad::SwapNpadAssignment(Core::HID::NpadIdType npad_id_1,
return ResultSuccess;
}
-Result Controller_NPad::GetLedPattern(Core::HID::NpadIdType npad_id,
- Core::HID::LedPattern& pattern) const {
+Result NPad::GetLedPattern(Core::HID::NpadIdType npad_id, Core::HID::LedPattern& pattern) const {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
return InvalidNpadId;
@@ -1450,8 +1116,8 @@ Result Controller_NPad::GetLedPattern(Core::HID::NpadIdType npad_id,
return ResultSuccess;
}
-Result Controller_NPad::IsUnintendedHomeButtonInputProtectionEnabled(Core::HID::NpadIdType npad_id,
- bool& is_valid) const {
+Result NPad::IsUnintendedHomeButtonInputProtectionEnabled(Core::HID::NpadIdType npad_id,
+ bool& is_valid) const {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
return InvalidNpadId;
@@ -1461,8 +1127,8 @@ Result Controller_NPad::IsUnintendedHomeButtonInputProtectionEnabled(Core::HID::
return ResultSuccess;
}
-Result Controller_NPad::SetUnintendedHomeButtonInputProtectionEnabled(
- bool is_protection_enabled, Core::HID::NpadIdType npad_id) {
+Result NPad::SetUnintendedHomeButtonInputProtectionEnabled(bool is_protection_enabled,
+ Core::HID::NpadIdType npad_id) {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
return InvalidNpadId;
@@ -1472,11 +1138,11 @@ Result Controller_NPad::SetUnintendedHomeButtonInputProtectionEnabled(
return ResultSuccess;
}
-void Controller_NPad::SetAnalogStickUseCenterClamp(bool use_center_clamp) {
+void NPad::SetAnalogStickUseCenterClamp(bool use_center_clamp) {
analog_stick_use_center_clamp = use_center_clamp;
}
-void Controller_NPad::ClearAllConnectedControllers() {
+void NPad::ClearAllConnectedControllers() {
for (auto& controller : controller_data) {
if (controller.device->IsConnected() &&
controller.device->GetNpadStyleIndex() != Core::HID::NpadStyleIndex::None) {
@@ -1486,13 +1152,13 @@ void Controller_NPad::ClearAllConnectedControllers() {
}
}
-void Controller_NPad::DisconnectAllConnectedControllers() {
+void NPad::DisconnectAllConnectedControllers() {
for (auto& controller : controller_data) {
controller.device->Disconnect();
}
}
-void Controller_NPad::ConnectAllDisconnectedControllers() {
+void NPad::ConnectAllDisconnectedControllers() {
for (auto& controller : controller_data) {
if (controller.device->GetNpadStyleIndex() != Core::HID::NpadStyleIndex::None &&
!controller.device->IsConnected()) {
@@ -1501,18 +1167,18 @@ void Controller_NPad::ConnectAllDisconnectedControllers() {
}
}
-void Controller_NPad::ClearAllControllers() {
+void NPad::ClearAllControllers() {
for (auto& controller : controller_data) {
controller.device->Disconnect();
controller.device->SetNpadStyleIndex(Core::HID::NpadStyleIndex::None);
}
}
-Core::HID::NpadButton Controller_NPad::GetAndResetPressState() {
+Core::HID::NpadButton NPad::GetAndResetPressState() {
return static_cast<Core::HID::NpadButton>(press_state.exchange(0));
}
-void Controller_NPad::ApplyNpadSystemCommonPolicy() {
+void NPad::ApplyNpadSystemCommonPolicy() {
Core::HID::NpadStyleTag styletag{};
styletag.fullkey.Assign(1);
styletag.handheld.Assign(1);
@@ -1537,7 +1203,7 @@ void Controller_NPad::ApplyNpadSystemCommonPolicy() {
supported_npad_id_types[9] = Core::HID::NpadIdType::Handheld;
}
-bool Controller_NPad::IsControllerSupported(Core::HID::NpadStyleIndex controller) const {
+bool NPad::IsControllerSupported(Core::HID::NpadStyleIndex controller) const {
if (controller == Core::HID::NpadStyleIndex::Handheld) {
const bool support_handheld =
std::find(supported_npad_id_types.begin(), supported_npad_id_types.end(),
@@ -1588,51 +1254,50 @@ bool Controller_NPad::IsControllerSupported(Core::HID::NpadStyleIndex controller
return false;
}
-Controller_NPad::NpadControllerData& Controller_NPad::GetControllerFromHandle(
- const Core::HID::SixAxisSensorHandle& device_handle) {
+NPad::NpadControllerData& NPad::GetControllerFromHandle(
+ const Core::HID::VibrationDeviceHandle& device_handle) {
const auto npad_id = static_cast<Core::HID::NpadIdType>(device_handle.npad_id);
return GetControllerFromNpadIdType(npad_id);
}
-const Controller_NPad::NpadControllerData& Controller_NPad::GetControllerFromHandle(
- const Core::HID::SixAxisSensorHandle& device_handle) const {
+const NPad::NpadControllerData& NPad::GetControllerFromHandle(
+ const Core::HID::VibrationDeviceHandle& device_handle) const {
const auto npad_id = static_cast<Core::HID::NpadIdType>(device_handle.npad_id);
return GetControllerFromNpadIdType(npad_id);
}
-Controller_NPad::NpadControllerData& Controller_NPad::GetControllerFromHandle(
- const Core::HID::VibrationDeviceHandle& device_handle) {
+NPad::NpadControllerData& NPad::GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle) {
const auto npad_id = static_cast<Core::HID::NpadIdType>(device_handle.npad_id);
return GetControllerFromNpadIdType(npad_id);
}
-const Controller_NPad::NpadControllerData& Controller_NPad::GetControllerFromHandle(
- const Core::HID::VibrationDeviceHandle& device_handle) const {
+const NPad::NpadControllerData& NPad::GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle) const {
const auto npad_id = static_cast<Core::HID::NpadIdType>(device_handle.npad_id);
return GetControllerFromNpadIdType(npad_id);
}
-Controller_NPad::NpadControllerData& Controller_NPad::GetControllerFromNpadIdType(
- Core::HID::NpadIdType npad_id) {
+NPad::NpadControllerData& NPad::GetControllerFromNpadIdType(Core::HID::NpadIdType npad_id) {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
npad_id = Core::HID::NpadIdType::Player1;
}
- const auto npad_index = Core::HID::NpadIdTypeToIndex(npad_id);
+ const auto npad_index = NpadIdTypeToIndex(npad_id);
return controller_data[npad_index];
}
-const Controller_NPad::NpadControllerData& Controller_NPad::GetControllerFromNpadIdType(
+const NPad::NpadControllerData& NPad::GetControllerFromNpadIdType(
Core::HID::NpadIdType npad_id) const {
if (!IsNpadIdValid(npad_id)) {
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
npad_id = Core::HID::NpadIdType::Player1;
}
- const auto npad_index = Core::HID::NpadIdTypeToIndex(npad_id);
+ const auto npad_index = NpadIdTypeToIndex(npad_id);
return controller_data[npad_index];
}
-Core::HID::SixAxisSensorProperties& Controller_NPad::GetSixaxisProperties(
+Core::HID::SixAxisSensorProperties& NPad::GetSixaxisProperties(
const Core::HID::SixAxisSensorHandle& sixaxis_handle) {
auto& controller = GetControllerFromHandle(sixaxis_handle);
switch (sixaxis_handle.npad_type) {
@@ -1655,7 +1320,7 @@ Core::HID::SixAxisSensorProperties& Controller_NPad::GetSixaxisProperties(
}
}
-const Core::HID::SixAxisSensorProperties& Controller_NPad::GetSixaxisProperties(
+const Core::HID::SixAxisSensorProperties& NPad::GetSixaxisProperties(
const Core::HID::SixAxisSensorHandle& sixaxis_handle) const {
const auto& controller = GetControllerFromHandle(sixaxis_handle);
switch (sixaxis_handle.npad_type) {
@@ -1678,50 +1343,13 @@ const Core::HID::SixAxisSensorProperties& Controller_NPad::GetSixaxisProperties(
}
}
-Controller_NPad::SixaxisParameters& Controller_NPad::GetSixaxisState(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle) {
- auto& controller = GetControllerFromHandle(sixaxis_handle);
- switch (sixaxis_handle.npad_type) {
- case Core::HID::NpadStyleIndex::ProController:
- case Core::HID::NpadStyleIndex::Pokeball:
- return controller.sixaxis_fullkey;
- case Core::HID::NpadStyleIndex::Handheld:
- return controller.sixaxis_handheld;
- case Core::HID::NpadStyleIndex::JoyconDual:
- if (sixaxis_handle.device_index == Core::HID::DeviceIndex::Left) {
- return controller.sixaxis_dual_left;
- }
- return controller.sixaxis_dual_right;
- case Core::HID::NpadStyleIndex::JoyconLeft:
- return controller.sixaxis_left;
- case Core::HID::NpadStyleIndex::JoyconRight:
- return controller.sixaxis_right;
- default:
- return controller.sixaxis_unknown;
- }
-}
+NPad::AppletDetailedUiType NPad::GetAppletDetailedUiType(Core::HID::NpadIdType npad_id) {
+ const auto& shared_memory = GetControllerFromNpadIdType(npad_id).shared_memory;
-const Controller_NPad::SixaxisParameters& Controller_NPad::GetSixaxisState(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle) const {
- const auto& controller = GetControllerFromHandle(sixaxis_handle);
- switch (sixaxis_handle.npad_type) {
- case Core::HID::NpadStyleIndex::ProController:
- case Core::HID::NpadStyleIndex::Pokeball:
- return controller.sixaxis_fullkey;
- case Core::HID::NpadStyleIndex::Handheld:
- return controller.sixaxis_handheld;
- case Core::HID::NpadStyleIndex::JoyconDual:
- if (sixaxis_handle.device_index == Core::HID::DeviceIndex::Left) {
- return controller.sixaxis_dual_left;
- }
- return controller.sixaxis_dual_right;
- case Core::HID::NpadStyleIndex::JoyconLeft:
- return controller.sixaxis_left;
- case Core::HID::NpadStyleIndex::JoyconRight:
- return controller.sixaxis_right;
- default:
- return controller.sixaxis_unknown;
- }
+ return {
+ .ui_variant = 0,
+ .footer = shared_memory->applet_footer_type,
+ };
}
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index 949e58a4c..9167c93f0 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -10,7 +10,6 @@
#include "common/bit_field.h"
#include "common/common_types.h"
-#include "common/vector_math.h"
#include "core/hid/hid_types.h"
#include "core/hle/service/hid/controllers/controller_base.h"
@@ -34,11 +33,11 @@ union Result;
namespace Service::HID {
-class Controller_NPad final : public ControllerBase {
+class NPad final : public ControllerBase {
public:
- explicit Controller_NPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
- KernelHelpers::ServiceContext& service_context_);
- ~Controller_NPad() override;
+ explicit NPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
+ KernelHelpers::ServiceContext& service_context_);
+ ~NPad() override;
// Called when the controller is initialized
void OnInit() override;
@@ -49,9 +48,6 @@ public:
// When the controller is requesting an update for the shared memory
void OnUpdate(const Core::Timing::CoreTiming& core_timing) override;
- // When the controller is requesting a motion update for the shared memory
- void OnMotionUpdate(const Core::Timing::CoreTiming& core_timing) override;
-
// This is nn::hid::NpadJoyHoldType
enum class NpadJoyHoldType : u64 {
Vertical = 0,
@@ -78,6 +74,46 @@ public:
MaxActivationMode = 3,
};
+ // This is nn::hid::system::AppletFooterUiAttributesSet
+ struct AppletFooterUiAttributes {
+ INSERT_PADDING_BYTES(0x4);
+ };
+
+ // This is nn::hid::system::AppletFooterUiType
+ enum class AppletFooterUiType : u8 {
+ None = 0,
+ HandheldNone = 1,
+ HandheldJoyConLeftOnly = 2,
+ HandheldJoyConRightOnly = 3,
+ HandheldJoyConLeftJoyConRight = 4,
+ JoyDual = 5,
+ JoyDualLeftOnly = 6,
+ JoyDualRightOnly = 7,
+ JoyLeftHorizontal = 8,
+ JoyLeftVertical = 9,
+ JoyRightHorizontal = 10,
+ JoyRightVertical = 11,
+ SwitchProController = 12,
+ CompatibleProController = 13,
+ CompatibleJoyCon = 14,
+ LarkHvc1 = 15,
+ LarkHvc2 = 16,
+ LarkNesLeft = 17,
+ LarkNesRight = 18,
+ Lucia = 19,
+ Verification = 20,
+ Lagon = 21,
+ };
+
+ using AppletFooterUiVariant = u8;
+
+ // This is "nn::hid::system::AppletDetailedUiType".
+ struct AppletDetailedUiType {
+ AppletFooterUiVariant ui_variant;
+ INSERT_PADDING_BYTES(0x2);
+ AppletFooterUiType footer;
+ };
+ static_assert(sizeof(AppletDetailedUiType) == 0x4, "AppletDetailedUiType is an invalid size");
// This is nn::hid::NpadCommunicationMode
enum class NpadCommunicationMode : u64 {
Mode_5ms = 0,
@@ -86,6 +122,15 @@ public:
Default = 3,
};
+ enum class NpadRevision : u32 {
+ Revision0 = 0,
+ Revision1 = 1,
+ Revision2 = 2,
+ Revision3 = 3,
+ };
+
+ using SixAxisLifo = Lifo<Core::HID::SixAxisSensorState, hid_entry_count>;
+
void SetSupportedStyleSet(Core::HID::NpadStyleTag style_set);
Core::HID::NpadStyleTag GetSupportedStyleSet() const;
@@ -138,37 +183,18 @@ public:
Result DisconnectNpad(Core::HID::NpadIdType npad_id);
- Result SetGyroscopeZeroDriftMode(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::GyroscopeZeroDriftMode drift_mode);
- Result GetGyroscopeZeroDriftMode(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::GyroscopeZeroDriftMode& drift_mode) const;
- Result IsSixAxisSensorAtRest(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- bool& is_at_rest) const;
Result IsFirmwareUpdateAvailableForSixAxisSensor(
const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool& is_firmware_available) const;
- Result EnableSixAxisSensorUnalteredPassthrough(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool is_enabled);
- Result IsSixAxisSensorUnalteredPassthroughEnabled(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool& is_enabled) const;
- Result LoadSixAxisSensorCalibrationParameter(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorCalibrationParameter& calibration) const;
- Result GetSixAxisSensorIcInformation(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorIcInformation& ic_information) const;
Result ResetIsSixAxisSensorDeviceNewlyAssigned(
const Core::HID::SixAxisSensorHandle& sixaxis_handle);
- Result SetSixAxisEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- bool sixaxis_status);
- Result IsSixAxisSensorFusionEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- bool& is_fusion_enabled) const;
- Result SetSixAxisFusionEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- bool is_fusion_enabled);
- Result SetSixAxisFusionParameters(
- const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorFusionParameters sixaxis_fusion_parameters);
- Result GetSixAxisFusionParameters(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
- Core::HID::SixAxisSensorFusionParameters& parameters) const;
+
+ SixAxisLifo& GetSixAxisFullkeyLifo(Core::HID::NpadIdType npad_id);
+ SixAxisLifo& GetSixAxisHandheldLifo(Core::HID::NpadIdType npad_id);
+ SixAxisLifo& GetSixAxisDualLeftLifo(Core::HID::NpadIdType npad_id);
+ SixAxisLifo& GetSixAxisDualRightLifo(Core::HID::NpadIdType npad_id);
+ SixAxisLifo& GetSixAxisLeftLifo(Core::HID::NpadIdType npad_id);
+ SixAxisLifo& GetSixAxisRightLifo(Core::HID::NpadIdType npad_id);
+
Result GetLedPattern(Core::HID::NpadIdType npad_id, Core::HID::LedPattern& pattern) const;
Result IsUnintendedHomeButtonInputProtectionEnabled(Core::HID::NpadIdType npad_id,
bool& is_enabled) const;
@@ -192,10 +218,7 @@ public:
void ApplyNpadSystemCommonPolicy();
- static bool IsNpadIdValid(Core::HID::NpadIdType npad_id);
- static Result IsDeviceHandleValid(const Core::HID::VibrationDeviceHandle& device_handle);
- static Result VerifyValidSixAxisSensorHandle(
- const Core::HID::SixAxisSensorHandle& device_handle);
+ AppletDetailedUiType GetAppletDetailedUiType(Core::HID::NpadIdType npad_id);
private:
static constexpr std::size_t NPAD_COUNT = 10;
@@ -254,29 +277,6 @@ private:
};
static_assert(sizeof(NPadGenericState) == 0x28, "NPadGenericState is an invalid size");
- // This is nn::hid::SixAxisSensorAttribute
- struct SixAxisSensorAttribute {
- union {
- u32 raw{};
- BitField<0, 1, u32> is_connected;
- BitField<1, 1, u32> is_interpolated;
- };
- };
- static_assert(sizeof(SixAxisSensorAttribute) == 4, "SixAxisSensorAttribute is an invalid size");
-
- // This is nn::hid::SixAxisSensorState
- struct SixAxisSensorState {
- s64 delta_time{};
- s64 sampling_number{};
- Common::Vec3f accel{};
- Common::Vec3f gyro{};
- Common::Vec3f rotation{};
- std::array<Common::Vec3f, 3> orientation{};
- SixAxisSensorAttribute attribute{};
- INSERT_PADDING_BYTES(4); // Reserved
- };
- static_assert(sizeof(SixAxisSensorState) == 0x60, "SixAxisSensorState is an invalid size");
-
// This is nn::hid::server::NpadGcTriggerState
struct NpadGcTriggerState {
s64 sampling_number{};
@@ -353,37 +353,6 @@ private:
static_assert(sizeof(NfcXcdDeviceHandleStateImpl) == 0x18,
"NfcXcdDeviceHandleStateImpl is an invalid size");
- // This is nn::hid::system::AppletFooterUiAttributesSet
- struct AppletFooterUiAttributes {
- INSERT_PADDING_BYTES(0x4);
- };
-
- // This is nn::hid::system::AppletFooterUiType
- enum class AppletFooterUiType : u8 {
- None = 0,
- HandheldNone = 1,
- HandheldJoyConLeftOnly = 2,
- HandheldJoyConRightOnly = 3,
- HandheldJoyConLeftJoyConRight = 4,
- JoyDual = 5,
- JoyDualLeftOnly = 6,
- JoyDualRightOnly = 7,
- JoyLeftHorizontal = 8,
- JoyLeftVertical = 9,
- JoyRightHorizontal = 10,
- JoyRightVertical = 11,
- SwitchProController = 12,
- CompatibleProController = 13,
- CompatibleJoyCon = 14,
- LarkHvc1 = 15,
- LarkHvc2 = 16,
- LarkNesLeft = 17,
- LarkNesRight = 18,
- Lucia = 19,
- Verification = 20,
- Lagon = 21,
- };
-
// This is nn::hid::NpadLarkType
enum class NpadLarkType : u32 {
Invalid,
@@ -427,12 +396,12 @@ private:
Lifo<NPadGenericState, hid_entry_count> joy_right_lifo{};
Lifo<NPadGenericState, hid_entry_count> palma_lifo{};
Lifo<NPadGenericState, hid_entry_count> system_ext_lifo{};
- Lifo<SixAxisSensorState, hid_entry_count> sixaxis_fullkey_lifo{};
- Lifo<SixAxisSensorState, hid_entry_count> sixaxis_handheld_lifo{};
- Lifo<SixAxisSensorState, hid_entry_count> sixaxis_dual_left_lifo{};
- Lifo<SixAxisSensorState, hid_entry_count> sixaxis_dual_right_lifo{};
- Lifo<SixAxisSensorState, hid_entry_count> sixaxis_left_lifo{};
- Lifo<SixAxisSensorState, hid_entry_count> sixaxis_right_lifo{};
+ Lifo<Core::HID::SixAxisSensorState, hid_entry_count> sixaxis_fullkey_lifo{};
+ Lifo<Core::HID::SixAxisSensorState, hid_entry_count> sixaxis_handheld_lifo{};
+ Lifo<Core::HID::SixAxisSensorState, hid_entry_count> sixaxis_dual_left_lifo{};
+ Lifo<Core::HID::SixAxisSensorState, hid_entry_count> sixaxis_dual_right_lifo{};
+ Lifo<Core::HID::SixAxisSensorState, hid_entry_count> sixaxis_left_lifo{};
+ Lifo<Core::HID::SixAxisSensorState, hid_entry_count> sixaxis_right_lifo{};
DeviceType device_type{};
INSERT_PADDING_BYTES(0x4); // Reserved
NPadSystemProperties system_properties{};
@@ -466,16 +435,6 @@ private:
std::chrono::steady_clock::time_point last_vibration_timepoint{};
};
- struct SixaxisParameters {
- bool is_fusion_enabled{true};
- bool unaltered_passtrough{false};
- Core::HID::SixAxisSensorFusionParameters fusion{};
- Core::HID::SixAxisSensorCalibrationParameter calibration{};
- Core::HID::SixAxisSensorIcInformation ic_information{};
- Core::HID::GyroscopeZeroDriftMode gyroscope_zero_drift_mode{
- Core::HID::GyroscopeZeroDriftMode::Standard};
- };
-
struct NpadControllerData {
Kernel::KEvent* styleset_changed_event{};
NpadInternalState* shared_memory = nullptr;
@@ -489,27 +448,10 @@ private:
bool is_dual_left_connected{true};
bool is_dual_right_connected{true};
- // Motion parameters
- bool sixaxis_at_rest{true};
- bool sixaxis_sensor_enabled{true};
- SixaxisParameters sixaxis_fullkey{};
- SixaxisParameters sixaxis_handheld{};
- SixaxisParameters sixaxis_dual_left{};
- SixaxisParameters sixaxis_dual_right{};
- SixaxisParameters sixaxis_left{};
- SixaxisParameters sixaxis_right{};
- SixaxisParameters sixaxis_unknown{};
-
// Current pad state
NPadGenericState npad_pad_state{};
NPadGenericState npad_libnx_state{};
NpadGcTriggerState npad_trigger_state{};
- SixAxisSensorState sixaxis_fullkey_state{};
- SixAxisSensorState sixaxis_handheld_state{};
- SixAxisSensorState sixaxis_dual_left_state{};
- SixAxisSensorState sixaxis_dual_right_state{};
- SixAxisSensorState sixaxis_left_lifo_state{};
- SixAxisSensorState sixaxis_right_lifo_state{};
int callback_key{};
};
@@ -520,13 +462,13 @@ private:
void WriteEmptyEntry(NpadInternalState* npad);
NpadControllerData& GetControllerFromHandle(
- const Core::HID::SixAxisSensorHandle& device_handle);
- const NpadControllerData& GetControllerFromHandle(
- const Core::HID::SixAxisSensorHandle& device_handle) const;
- NpadControllerData& GetControllerFromHandle(
const Core::HID::VibrationDeviceHandle& device_handle);
const NpadControllerData& GetControllerFromHandle(
const Core::HID::VibrationDeviceHandle& device_handle) const;
+ NpadControllerData& GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle);
+ const NpadControllerData& GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle) const;
NpadControllerData& GetControllerFromNpadIdType(Core::HID::NpadIdType npad_id);
const NpadControllerData& GetControllerFromNpadIdType(Core::HID::NpadIdType npad_id) const;
@@ -534,9 +476,6 @@ private:
const Core::HID::SixAxisSensorHandle& device_handle);
const Core::HID::SixAxisSensorProperties& GetSixaxisProperties(
const Core::HID::SixAxisSensorHandle& device_handle) const;
- SixaxisParameters& GetSixaxisState(const Core::HID::SixAxisSensorHandle& device_handle);
- const SixaxisParameters& GetSixaxisState(
- const Core::HID::SixAxisSensorHandle& device_handle) const;
std::atomic<u64> press_state{};
diff --git a/src/core/hle/service/hid/controllers/palma.cpp b/src/core/hle/service/hid/controllers/palma.cpp
index 14c67e454..588ff9d62 100644
--- a/src/core/hle/service/hid/controllers/palma.cpp
+++ b/src/core/hle/service/hid/controllers/palma.cpp
@@ -12,41 +12,43 @@
namespace Service::HID {
-Controller_Palma::Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
- KernelHelpers::ServiceContext& service_context_)
+Palma::Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
+ KernelHelpers::ServiceContext& service_context_)
: ControllerBase{hid_core_}, service_context{service_context_} {
controller = hid_core.GetEmulatedController(Core::HID::NpadIdType::Other);
operation_complete_event = service_context.CreateEvent("hid:PalmaOperationCompleteEvent");
}
-Controller_Palma::~Controller_Palma() = default;
+Palma::~Palma() {
+ service_context.CloseEvent(operation_complete_event);
+};
-void Controller_Palma::OnInit() {}
+void Palma::OnInit() {}
-void Controller_Palma::OnRelease() {}
+void Palma::OnRelease() {}
-void Controller_Palma::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void Palma::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated()) {
return;
}
}
-Result Controller_Palma::GetPalmaConnectionHandle(Core::HID::NpadIdType npad_id,
- PalmaConnectionHandle& handle) {
+Result Palma::GetPalmaConnectionHandle(Core::HID::NpadIdType npad_id,
+ PalmaConnectionHandle& handle) {
active_handle.npad_id = npad_id;
handle = active_handle;
return ResultSuccess;
}
-Result Controller_Palma::InitializePalma(const PalmaConnectionHandle& handle) {
+Result Palma::InitializePalma(const PalmaConnectionHandle& handle) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
- ActivateController();
+ Activate();
return ResultSuccess;
}
-Kernel::KReadableEvent& Controller_Palma::AcquirePalmaOperationCompleteEvent(
+Kernel::KReadableEvent& Palma::AcquirePalmaOperationCompleteEvent(
const PalmaConnectionHandle& handle) const {
if (handle.npad_id != active_handle.npad_id) {
LOG_ERROR(Service_HID, "Invalid npad id {}", handle.npad_id);
@@ -54,9 +56,9 @@ Kernel::KReadableEvent& Controller_Palma::AcquirePalmaOperationCompleteEvent(
return operation_complete_event->GetReadableEvent();
}
-Result Controller_Palma::GetPalmaOperationInfo(const PalmaConnectionHandle& handle,
- PalmaOperationType& operation_type,
- PalmaOperationData& data) const {
+Result Palma::GetPalmaOperationInfo(const PalmaConnectionHandle& handle,
+ PalmaOperationType& operation_type,
+ PalmaOperationData& data) const {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -65,8 +67,7 @@ Result Controller_Palma::GetPalmaOperationInfo(const PalmaConnectionHandle& hand
return ResultSuccess;
}
-Result Controller_Palma::PlayPalmaActivity(const PalmaConnectionHandle& handle,
- u64 palma_activity) {
+Result Palma::PlayPalmaActivity(const PalmaConnectionHandle& handle, u64 palma_activity) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -77,8 +78,7 @@ Result Controller_Palma::PlayPalmaActivity(const PalmaConnectionHandle& handle,
return ResultSuccess;
}
-Result Controller_Palma::SetPalmaFrModeType(const PalmaConnectionHandle& handle,
- PalmaFrModeType fr_mode_) {
+Result Palma::SetPalmaFrModeType(const PalmaConnectionHandle& handle, PalmaFrModeType fr_mode_) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -86,7 +86,7 @@ Result Controller_Palma::SetPalmaFrModeType(const PalmaConnectionHandle& handle,
return ResultSuccess;
}
-Result Controller_Palma::ReadPalmaStep(const PalmaConnectionHandle& handle) {
+Result Palma::ReadPalmaStep(const PalmaConnectionHandle& handle) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -97,25 +97,25 @@ Result Controller_Palma::ReadPalmaStep(const PalmaConnectionHandle& handle) {
return ResultSuccess;
}
-Result Controller_Palma::EnablePalmaStep(const PalmaConnectionHandle& handle, bool is_enabled) {
+Result Palma::EnablePalmaStep(const PalmaConnectionHandle& handle, bool is_enabled) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
return ResultSuccess;
}
-Result Controller_Palma::ResetPalmaStep(const PalmaConnectionHandle& handle) {
+Result Palma::ResetPalmaStep(const PalmaConnectionHandle& handle) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
return ResultSuccess;
}
-void Controller_Palma::ReadPalmaApplicationSection() {}
+void Palma::ReadPalmaApplicationSection() {}
-void Controller_Palma::WritePalmaApplicationSection() {}
+void Palma::WritePalmaApplicationSection() {}
-Result Controller_Palma::ReadPalmaUniqueCode(const PalmaConnectionHandle& handle) {
+Result Palma::ReadPalmaUniqueCode(const PalmaConnectionHandle& handle) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -126,7 +126,7 @@ Result Controller_Palma::ReadPalmaUniqueCode(const PalmaConnectionHandle& handle
return ResultSuccess;
}
-Result Controller_Palma::SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle) {
+Result Palma::SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -137,10 +137,9 @@ Result Controller_Palma::SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle&
return ResultSuccess;
}
-void Controller_Palma::WritePalmaActivityEntry() {}
+void Palma::WritePalmaActivityEntry() {}
-Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle,
- u64 unknown) {
+Result Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle, u64 unknown) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -151,8 +150,8 @@ Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandl
return ResultSuccess;
}
-Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave,
- Common::ProcessAddress t_mem, u64 size) {
+Result Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave,
+ Common::ProcessAddress t_mem, u64 size) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -163,8 +162,8 @@ Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle
return ResultSuccess;
}
-Result Controller_Palma::SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle,
- s32 database_id_version_) {
+Result Palma::SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle,
+ s32 database_id_version_) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -176,8 +175,7 @@ Result Controller_Palma::SetPalmaDataBaseIdentificationVersion(const PalmaConnec
return ResultSuccess;
}
-Result Controller_Palma::GetPalmaDataBaseIdentificationVersion(
- const PalmaConnectionHandle& handle) {
+Result Palma::GetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -189,26 +187,26 @@ Result Controller_Palma::GetPalmaDataBaseIdentificationVersion(
return ResultSuccess;
}
-void Controller_Palma::SuspendPalmaFeature() {}
+void Palma::SuspendPalmaFeature() {}
-Result Controller_Palma::GetPalmaOperationResult(const PalmaConnectionHandle& handle) const {
+Result Palma::GetPalmaOperationResult(const PalmaConnectionHandle& handle) const {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
return operation.result;
}
-void Controller_Palma::ReadPalmaPlayLog() {}
+void Palma::ReadPalmaPlayLog() {}
-void Controller_Palma::ResetPalmaPlayLog() {}
+void Palma::ResetPalmaPlayLog() {}
-void Controller_Palma::SetIsPalmaAllConnectable(bool is_all_connectable) {
+void Palma::SetIsPalmaAllConnectable(bool is_all_connectable) {
// If true controllers are able to be paired
is_connectable = is_all_connectable;
}
-void Controller_Palma::SetIsPalmaPairedConnectable() {}
+void Palma::SetIsPalmaPairedConnectable() {}
-Result Controller_Palma::PairPalma(const PalmaConnectionHandle& handle) {
+Result Palma::PairPalma(const PalmaConnectionHandle& handle) {
if (handle.npad_id != active_handle.npad_id) {
return InvalidPalmaHandle;
}
@@ -216,14 +214,14 @@ Result Controller_Palma::PairPalma(const PalmaConnectionHandle& handle) {
return ResultSuccess;
}
-void Controller_Palma::SetPalmaBoostMode(bool boost_mode) {}
+void Palma::SetPalmaBoostMode(bool boost_mode) {}
-void Controller_Palma::CancelWritePalmaWaveEntry() {}
+void Palma::CancelWritePalmaWaveEntry() {}
-void Controller_Palma::EnablePalmaBoostMode() {}
+void Palma::EnablePalmaBoostMode() {}
-void Controller_Palma::GetPalmaBluetoothAddress() {}
+void Palma::GetPalmaBluetoothAddress() {}
-void Controller_Palma::SetDisallowedPalmaConnection() {}
+void Palma::SetDisallowedPalmaConnection() {}
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/palma.h b/src/core/hle/service/hid/controllers/palma.h
index a0491a819..a6047f36a 100644
--- a/src/core/hle/service/hid/controllers/palma.h
+++ b/src/core/hle/service/hid/controllers/palma.h
@@ -23,7 +23,7 @@ class EmulatedController;
} // namespace Core::HID
namespace Service::HID {
-class Controller_Palma final : public ControllerBase {
+class Palma final : public ControllerBase {
public:
using PalmaOperationData = std::array<u8, 0x140>;
@@ -97,9 +97,9 @@ public:
static_assert(sizeof(PalmaConnectionHandle) == 0x8,
"PalmaConnectionHandle has incorrect size.");
- explicit Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
- KernelHelpers::ServiceContext& service_context_);
- ~Controller_Palma() override;
+ explicit Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
+ KernelHelpers::ServiceContext& service_context_);
+ ~Palma() override;
// Called when the controller is initialized
void OnInit() override;
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.cpp b/src/core/hle/service/hid/controllers/seven_six_axis.cpp
index bcb272eaf..495568484 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.cpp
+++ b/src/core/hle/service/hid/controllers/seven_six_axis.cpp
@@ -1,32 +1,29 @@
-// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+#include <cstring>
+#include "common/common_types.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/frontend/emu_window.h"
#include "core/hid/emulated_console.h"
+#include "core/hid/emulated_devices.h"
#include "core/hid/hid_core.h"
-#include "core/hle/service/hid/controllers/console_sixaxis.h"
+#include "core/hle/service/hid/controllers/seven_six_axis.h"
#include "core/memory.h"
namespace Service::HID {
-constexpr std::size_t SHARED_MEMORY_OFFSET = 0x3C200;
-
-Controller_ConsoleSixAxis::Controller_ConsoleSixAxis(Core::System& system_, u8* raw_shared_memory_)
+SevenSixAxis::SevenSixAxis(Core::System& system_)
: ControllerBase{system_.HIDCore()}, system{system_} {
console = hid_core.GetEmulatedConsole();
- static_assert(SHARED_MEMORY_OFFSET + sizeof(ConsoleSharedMemory) < shared_memory_size,
- "ConsoleSharedMemory is bigger than the shared memory");
- shared_memory = std::construct_at(
- reinterpret_cast<ConsoleSharedMemory*>(raw_shared_memory_ + SHARED_MEMORY_OFFSET));
}
-Controller_ConsoleSixAxis::~Controller_ConsoleSixAxis() = default;
-
-void Controller_ConsoleSixAxis::OnInit() {}
+SevenSixAxis::~SevenSixAxis() = default;
-void Controller_ConsoleSixAxis::OnRelease() {}
+void SevenSixAxis::OnInit() {}
+void SevenSixAxis::OnRelease() {}
-void Controller_ConsoleSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void SevenSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated() || transfer_memory == 0) {
seven_sixaxis_lifo.buffer_count = 0;
seven_sixaxis_lifo.buffer_tail = 0;
@@ -53,22 +50,17 @@ void Controller_ConsoleSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_ti
-motion_status.quaternion.xyz.z,
};
- shared_memory->sampling_number++;
- shared_memory->is_seven_six_axis_sensor_at_rest = motion_status.is_at_rest;
- shared_memory->verticalization_error = motion_status.verticalization_error;
- shared_memory->gyro_bias = motion_status.gyro_bias;
-
- // Update seven six axis transfer memory
seven_sixaxis_lifo.WriteNextEntry(next_seven_sixaxis_state);
system.ApplicationMemory().WriteBlock(transfer_memory, &seven_sixaxis_lifo,
sizeof(seven_sixaxis_lifo));
}
-void Controller_ConsoleSixAxis::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
+void SevenSixAxis::SetTransferMemoryAddress(Common::ProcessAddress t_mem) {
transfer_memory = t_mem;
}
-void Controller_ConsoleSixAxis::ResetTimestamp() {
+void SevenSixAxis::ResetTimestamp() {
last_saved_timestamp = last_global_timestamp;
}
+
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.h b/src/core/hle/service/hid/controllers/seven_six_axis.h
index 7015d924c..40e3f5d12 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.h
+++ b/src/core/hle/service/hid/controllers/seven_six_axis.h
@@ -1,10 +1,9 @@
-// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
#pragma once
-#include <array>
-
+#include "common/common_types.h"
#include "common/quaternion.h"
#include "common/typed_address.h"
#include "core/hle/service/hid/controllers/controller_base.h"
@@ -19,10 +18,10 @@ class EmulatedConsole;
} // namespace Core::HID
namespace Service::HID {
-class Controller_ConsoleSixAxis final : public ControllerBase {
+class SevenSixAxis final : public ControllerBase {
public:
- explicit Controller_ConsoleSixAxis(Core::System& system_, u8* raw_shared_memory_);
- ~Controller_ConsoleSixAxis() override;
+ explicit SevenSixAxis(Core::System& system_);
+ ~SevenSixAxis() override;
// Called when the controller is initialized
void OnInit() override;
@@ -51,28 +50,16 @@ private:
};
static_assert(sizeof(SevenSixAxisState) == 0x48, "SevenSixAxisState is an invalid size");
- // This is nn::hid::detail::ConsoleSixAxisSensorSharedMemoryFormat
- struct ConsoleSharedMemory {
- u64 sampling_number{};
- bool is_seven_six_axis_sensor_at_rest{};
- INSERT_PADDING_BYTES(3); // padding
- f32 verticalization_error{};
- Common::Vec3f gyro_bias{};
- INSERT_PADDING_BYTES(4); // padding
- };
- static_assert(sizeof(ConsoleSharedMemory) == 0x20, "ConsoleSharedMemory is an invalid size");
-
Lifo<SevenSixAxisState, 0x21> seven_sixaxis_lifo{};
static_assert(sizeof(seven_sixaxis_lifo) == 0xA70, "SevenSixAxisState is an invalid size");
+ u64 last_saved_timestamp{};
+ u64 last_global_timestamp{};
+
SevenSixAxisState next_seven_sixaxis_state{};
Common::ProcessAddress transfer_memory{};
- ConsoleSharedMemory* shared_memory = nullptr;
Core::HID::EmulatedConsole* console = nullptr;
- u64 last_saved_timestamp{};
- u64 last_global_timestamp{};
-
Core::System& system;
};
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/six_axis.cpp b/src/core/hle/service/hid/controllers/six_axis.cpp
new file mode 100644
index 000000000..3d24a5c04
--- /dev/null
+++ b/src/core/hle/service/hid/controllers/six_axis.cpp
@@ -0,0 +1,413 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/common_types.h"
+#include "core/core_timing.h"
+#include "core/hid/emulated_controller.h"
+#include "core/hid/hid_core.h"
+#include "core/hle/service/hid/controllers/npad.h"
+#include "core/hle/service/hid/controllers/six_axis.h"
+#include "core/hle/service/hid/errors.h"
+#include "core/hle/service/hid/hid_util.h"
+
+namespace Service::HID {
+
+SixAxis::SixAxis(Core::HID::HIDCore& hid_core_, std::shared_ptr<NPad> npad_)
+ : ControllerBase{hid_core_}, npad{npad_} {
+ for (std::size_t i = 0; i < controller_data.size(); ++i) {
+ auto& controller = controller_data[i];
+ controller.device = hid_core.GetEmulatedControllerByIndex(i);
+ }
+}
+
+SixAxis::~SixAxis() = default;
+
+void SixAxis::OnInit() {}
+void SixAxis::OnRelease() {}
+
+void SixAxis::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+ if (!IsControllerActivated()) {
+ return;
+ }
+
+ for (std::size_t i = 0; i < controller_data.size(); ++i) {
+ auto& controller = controller_data[i];
+
+ const auto npad_id = IndexToNpadIdType(i);
+ const auto& controller_type = controller.device->GetNpadStyleIndex();
+
+ if (controller_type == Core::HID::NpadStyleIndex::None ||
+ !controller.device->IsConnected()) {
+ continue;
+ }
+
+ const auto& motion_state = controller.device->GetMotions();
+ auto& sixaxis_fullkey_state = controller.sixaxis_fullkey_state;
+ auto& sixaxis_handheld_state = controller.sixaxis_handheld_state;
+ auto& sixaxis_dual_left_state = controller.sixaxis_dual_left_state;
+ auto& sixaxis_dual_right_state = controller.sixaxis_dual_right_state;
+ auto& sixaxis_left_lifo_state = controller.sixaxis_left_lifo_state;
+ auto& sixaxis_right_lifo_state = controller.sixaxis_right_lifo_state;
+
+ auto& sixaxis_fullkey_lifo = npad->GetSixAxisFullkeyLifo(npad_id);
+ auto& sixaxis_handheld_lifo = npad->GetSixAxisHandheldLifo(npad_id);
+ auto& sixaxis_dual_left_lifo = npad->GetSixAxisDualLeftLifo(npad_id);
+ auto& sixaxis_dual_right_lifo = npad->GetSixAxisDualRightLifo(npad_id);
+ auto& sixaxis_left_lifo = npad->GetSixAxisLeftLifo(npad_id);
+ auto& sixaxis_right_lifo = npad->GetSixAxisRightLifo(npad_id);
+
+ // Clear previous state
+ sixaxis_fullkey_state = {};
+ sixaxis_handheld_state = {};
+ sixaxis_dual_left_state = {};
+ sixaxis_dual_right_state = {};
+ sixaxis_left_lifo_state = {};
+ sixaxis_right_lifo_state = {};
+
+ if (controller.sixaxis_sensor_enabled && Settings::values.motion_enabled.GetValue()) {
+ controller.sixaxis_at_rest = true;
+ for (std::size_t e = 0; e < motion_state.size(); ++e) {
+ controller.sixaxis_at_rest =
+ controller.sixaxis_at_rest && motion_state[e].is_at_rest;
+ }
+ }
+
+ const auto set_motion_state = [&](Core::HID::SixAxisSensorState& state,
+ const Core::HID::ControllerMotion& hid_state) {
+ using namespace std::literals::chrono_literals;
+ static constexpr Core::HID::SixAxisSensorState default_motion_state = {
+ .delta_time = std::chrono::nanoseconds(5ms).count(),
+ .accel = {0, 0, -1.0f},
+ .orientation =
+ {
+ Common::Vec3f{1.0f, 0, 0},
+ Common::Vec3f{0, 1.0f, 0},
+ Common::Vec3f{0, 0, 1.0f},
+ },
+ .attribute = {1},
+ };
+ if (!controller.sixaxis_sensor_enabled) {
+ state = default_motion_state;
+ return;
+ }
+ if (!Settings::values.motion_enabled.GetValue()) {
+ state = default_motion_state;
+ return;
+ }
+ state.attribute.is_connected.Assign(1);
+ state.delta_time = std::chrono::nanoseconds(5ms).count();
+ state.accel = hid_state.accel;
+ state.gyro = hid_state.gyro;
+ state.rotation = hid_state.rotation;
+ state.orientation = hid_state.orientation;
+ };
+
+ switch (controller_type) {
+ case Core::HID::NpadStyleIndex::None:
+ ASSERT(false);
+ break;
+ case Core::HID::NpadStyleIndex::ProController:
+ set_motion_state(sixaxis_fullkey_state, motion_state[0]);
+ break;
+ case Core::HID::NpadStyleIndex::Handheld:
+ set_motion_state(sixaxis_handheld_state, motion_state[0]);
+ break;
+ case Core::HID::NpadStyleIndex::JoyconDual:
+ set_motion_state(sixaxis_dual_left_state, motion_state[0]);
+ set_motion_state(sixaxis_dual_right_state, motion_state[1]);
+ break;
+ case Core::HID::NpadStyleIndex::JoyconLeft:
+ set_motion_state(sixaxis_left_lifo_state, motion_state[0]);
+ break;
+ case Core::HID::NpadStyleIndex::JoyconRight:
+ set_motion_state(sixaxis_right_lifo_state, motion_state[1]);
+ break;
+ case Core::HID::NpadStyleIndex::Pokeball:
+ using namespace std::literals::chrono_literals;
+ set_motion_state(sixaxis_fullkey_state, motion_state[0]);
+ sixaxis_fullkey_state.delta_time = std::chrono::nanoseconds(15ms).count();
+ break;
+ default:
+ break;
+ }
+
+ sixaxis_fullkey_state.sampling_number =
+ sixaxis_fullkey_lifo.ReadCurrentEntry().state.sampling_number + 1;
+ sixaxis_handheld_state.sampling_number =
+ sixaxis_handheld_lifo.ReadCurrentEntry().state.sampling_number + 1;
+ sixaxis_dual_left_state.sampling_number =
+ sixaxis_dual_left_lifo.ReadCurrentEntry().state.sampling_number + 1;
+ sixaxis_dual_right_state.sampling_number =
+ sixaxis_dual_right_lifo.ReadCurrentEntry().state.sampling_number + 1;
+ sixaxis_left_lifo_state.sampling_number =
+ sixaxis_left_lifo.ReadCurrentEntry().state.sampling_number + 1;
+ sixaxis_right_lifo_state.sampling_number =
+ sixaxis_right_lifo.ReadCurrentEntry().state.sampling_number + 1;
+
+ if (IndexToNpadIdType(i) == Core::HID::NpadIdType::Handheld) {
+ // This buffer only is updated on handheld on HW
+ sixaxis_handheld_lifo.WriteNextEntry(sixaxis_handheld_state);
+ } else {
+ // Handheld doesn't update this buffer on HW
+ sixaxis_fullkey_lifo.WriteNextEntry(sixaxis_fullkey_state);
+ }
+
+ sixaxis_dual_left_lifo.WriteNextEntry(sixaxis_dual_left_state);
+ sixaxis_dual_right_lifo.WriteNextEntry(sixaxis_dual_right_state);
+ sixaxis_left_lifo.WriteNextEntry(sixaxis_left_lifo_state);
+ sixaxis_right_lifo.WriteNextEntry(sixaxis_right_lifo_state);
+ }
+}
+
+Result SixAxis::SetGyroscopeZeroDriftMode(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::GyroscopeZeroDriftMode drift_mode) {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ auto& controller = GetControllerFromHandle(sixaxis_handle);
+ sixaxis.gyroscope_zero_drift_mode = drift_mode;
+ controller.device->SetGyroscopeZeroDriftMode(drift_mode);
+
+ return ResultSuccess;
+}
+
+Result SixAxis::GetGyroscopeZeroDriftMode(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::GyroscopeZeroDriftMode& drift_mode) const {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ const auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ drift_mode = sixaxis.gyroscope_zero_drift_mode;
+
+ return ResultSuccess;
+}
+
+Result SixAxis::IsSixAxisSensorAtRest(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool& is_at_rest) const {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ const auto& controller = GetControllerFromHandle(sixaxis_handle);
+ is_at_rest = controller.sixaxis_at_rest;
+ return ResultSuccess;
+}
+
+Result SixAxis::LoadSixAxisSensorCalibrationParameter(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorCalibrationParameter& calibration) const {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ // TODO: Request this data to the controller. On error return 0xd8ca
+ const auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ calibration = sixaxis.calibration;
+ return ResultSuccess;
+}
+
+Result SixAxis::GetSixAxisSensorIcInformation(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorIcInformation& ic_information) const {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ // TODO: Request this data to the controller. On error return 0xd8ca
+ const auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ ic_information = sixaxis.ic_information;
+ return ResultSuccess;
+}
+
+Result SixAxis::EnableSixAxisSensorUnalteredPassthrough(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool is_enabled) {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ sixaxis.unaltered_passtrough = is_enabled;
+ return ResultSuccess;
+}
+
+Result SixAxis::IsSixAxisSensorUnalteredPassthroughEnabled(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool& is_enabled) const {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ const auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ is_enabled = sixaxis.unaltered_passtrough;
+ return ResultSuccess;
+}
+
+Result SixAxis::SetSixAxisEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool sixaxis_status) {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ auto& controller = GetControllerFromHandle(sixaxis_handle);
+ controller.sixaxis_sensor_enabled = sixaxis_status;
+ return ResultSuccess;
+}
+
+Result SixAxis::IsSixAxisSensorFusionEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool& is_fusion_enabled) const {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ const auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ is_fusion_enabled = sixaxis.is_fusion_enabled;
+
+ return ResultSuccess;
+}
+Result SixAxis::SetSixAxisFusionEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool is_fusion_enabled) {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ sixaxis.is_fusion_enabled = is_fusion_enabled;
+
+ return ResultSuccess;
+}
+
+Result SixAxis::SetSixAxisFusionParameters(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorFusionParameters sixaxis_fusion_parameters) {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ const auto param1 = sixaxis_fusion_parameters.parameter1;
+ if (param1 < 0.0f || param1 > 1.0f) {
+ return InvalidSixAxisFusionRange;
+ }
+
+ auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ sixaxis.fusion = sixaxis_fusion_parameters;
+
+ return ResultSuccess;
+}
+
+Result SixAxis::GetSixAxisFusionParameters(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorFusionParameters& parameters) const {
+ const auto is_valid = IsSixaxisHandleValid(sixaxis_handle);
+ if (is_valid.IsError()) {
+ LOG_ERROR(Service_HID, "Invalid handle, error_code={}", is_valid.raw);
+ return is_valid;
+ }
+
+ const auto& sixaxis = GetSixaxisState(sixaxis_handle);
+ parameters = sixaxis.fusion;
+
+ return ResultSuccess;
+}
+
+SixAxis::SixaxisParameters& SixAxis::GetSixaxisState(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle) {
+ auto& controller = GetControllerFromHandle(sixaxis_handle);
+ switch (sixaxis_handle.npad_type) {
+ case Core::HID::NpadStyleIndex::ProController:
+ case Core::HID::NpadStyleIndex::Pokeball:
+ return controller.sixaxis_fullkey;
+ case Core::HID::NpadStyleIndex::Handheld:
+ return controller.sixaxis_handheld;
+ case Core::HID::NpadStyleIndex::JoyconDual:
+ if (sixaxis_handle.device_index == Core::HID::DeviceIndex::Left) {
+ return controller.sixaxis_dual_left;
+ }
+ return controller.sixaxis_dual_right;
+ case Core::HID::NpadStyleIndex::JoyconLeft:
+ return controller.sixaxis_left;
+ case Core::HID::NpadStyleIndex::JoyconRight:
+ return controller.sixaxis_right;
+ default:
+ return controller.sixaxis_unknown;
+ }
+}
+
+const SixAxis::SixaxisParameters& SixAxis::GetSixaxisState(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle) const {
+ const auto& controller = GetControllerFromHandle(sixaxis_handle);
+ switch (sixaxis_handle.npad_type) {
+ case Core::HID::NpadStyleIndex::ProController:
+ case Core::HID::NpadStyleIndex::Pokeball:
+ return controller.sixaxis_fullkey;
+ case Core::HID::NpadStyleIndex::Handheld:
+ return controller.sixaxis_handheld;
+ case Core::HID::NpadStyleIndex::JoyconDual:
+ if (sixaxis_handle.device_index == Core::HID::DeviceIndex::Left) {
+ return controller.sixaxis_dual_left;
+ }
+ return controller.sixaxis_dual_right;
+ case Core::HID::NpadStyleIndex::JoyconLeft:
+ return controller.sixaxis_left;
+ case Core::HID::NpadStyleIndex::JoyconRight:
+ return controller.sixaxis_right;
+ default:
+ return controller.sixaxis_unknown;
+ }
+}
+
+SixAxis::NpadControllerData& SixAxis::GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle) {
+ const auto npad_id = static_cast<Core::HID::NpadIdType>(device_handle.npad_id);
+ return GetControllerFromNpadIdType(npad_id);
+}
+
+const SixAxis::NpadControllerData& SixAxis::GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle) const {
+ const auto npad_id = static_cast<Core::HID::NpadIdType>(device_handle.npad_id);
+ return GetControllerFromNpadIdType(npad_id);
+}
+
+SixAxis::NpadControllerData& SixAxis::GetControllerFromNpadIdType(Core::HID::NpadIdType npad_id) {
+ if (!IsNpadIdValid(npad_id)) {
+ LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
+ npad_id = Core::HID::NpadIdType::Player1;
+ }
+ const auto npad_index = NpadIdTypeToIndex(npad_id);
+ return controller_data[npad_index];
+}
+
+const SixAxis::NpadControllerData& SixAxis::GetControllerFromNpadIdType(
+ Core::HID::NpadIdType npad_id) const {
+ if (!IsNpadIdValid(npad_id)) {
+ LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
+ npad_id = Core::HID::NpadIdType::Player1;
+ }
+ const auto npad_index = NpadIdTypeToIndex(npad_id);
+ return controller_data[npad_index];
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/six_axis.h b/src/core/hle/service/hid/controllers/six_axis.h
new file mode 100644
index 000000000..4c4f5dc7b
--- /dev/null
+++ b/src/core/hle/service/hid/controllers/six_axis.h
@@ -0,0 +1,111 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hid/hid_types.h"
+#include "core/hle/service/hid/controllers/controller_base.h"
+#include "core/hle/service/hid/ring_lifo.h"
+
+namespace Core::HID {
+class EmulatedController;
+} // namespace Core::HID
+
+namespace Service::HID {
+class NPad;
+
+class SixAxis final : public ControllerBase {
+public:
+ explicit SixAxis(Core::HID::HIDCore& hid_core_, std::shared_ptr<NPad> npad_);
+ ~SixAxis() override;
+
+ // Called when the controller is initialized
+ void OnInit() override;
+
+ // When the controller is released
+ void OnRelease() override;
+
+ // When the controller is requesting an update for the shared memory
+ void OnUpdate(const Core::Timing::CoreTiming& core_timing) override;
+
+ Result SetGyroscopeZeroDriftMode(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::GyroscopeZeroDriftMode drift_mode);
+ Result GetGyroscopeZeroDriftMode(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::GyroscopeZeroDriftMode& drift_mode) const;
+ Result IsSixAxisSensorAtRest(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool& is_at_rest) const;
+ Result EnableSixAxisSensorUnalteredPassthrough(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool is_enabled);
+ Result IsSixAxisSensorUnalteredPassthroughEnabled(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle, bool& is_enabled) const;
+ Result LoadSixAxisSensorCalibrationParameter(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorCalibrationParameter& calibration) const;
+ Result GetSixAxisSensorIcInformation(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorIcInformation& ic_information) const;
+ Result SetSixAxisEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool sixaxis_status);
+ Result IsSixAxisSensorFusionEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool& is_fusion_enabled) const;
+ Result SetSixAxisFusionEnabled(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ bool is_fusion_enabled);
+ Result SetSixAxisFusionParameters(
+ const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorFusionParameters sixaxis_fusion_parameters);
+ Result GetSixAxisFusionParameters(const Core::HID::SixAxisSensorHandle& sixaxis_handle,
+ Core::HID::SixAxisSensorFusionParameters& parameters) const;
+
+private:
+ static constexpr std::size_t NPAD_COUNT = 10;
+
+ struct SixaxisParameters {
+ bool is_fusion_enabled{true};
+ bool unaltered_passtrough{false};
+ Core::HID::SixAxisSensorFusionParameters fusion{};
+ Core::HID::SixAxisSensorCalibrationParameter calibration{};
+ Core::HID::SixAxisSensorIcInformation ic_information{};
+ Core::HID::GyroscopeZeroDriftMode gyroscope_zero_drift_mode{
+ Core::HID::GyroscopeZeroDriftMode::Standard};
+ };
+
+ struct NpadControllerData {
+ Core::HID::EmulatedController* device = nullptr;
+
+ // Motion parameters
+ bool sixaxis_at_rest{true};
+ bool sixaxis_sensor_enabled{true};
+ SixaxisParameters sixaxis_fullkey{};
+ SixaxisParameters sixaxis_handheld{};
+ SixaxisParameters sixaxis_dual_left{};
+ SixaxisParameters sixaxis_dual_right{};
+ SixaxisParameters sixaxis_left{};
+ SixaxisParameters sixaxis_right{};
+ SixaxisParameters sixaxis_unknown{};
+
+ // Current pad state
+ Core::HID::SixAxisSensorState sixaxis_fullkey_state{};
+ Core::HID::SixAxisSensorState sixaxis_handheld_state{};
+ Core::HID::SixAxisSensorState sixaxis_dual_left_state{};
+ Core::HID::SixAxisSensorState sixaxis_dual_right_state{};
+ Core::HID::SixAxisSensorState sixaxis_left_lifo_state{};
+ Core::HID::SixAxisSensorState sixaxis_right_lifo_state{};
+ int callback_key{};
+ };
+
+ SixaxisParameters& GetSixaxisState(const Core::HID::SixAxisSensorHandle& device_handle);
+ const SixaxisParameters& GetSixaxisState(
+ const Core::HID::SixAxisSensorHandle& device_handle) const;
+
+ NpadControllerData& GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle);
+ const NpadControllerData& GetControllerFromHandle(
+ const Core::HID::SixAxisSensorHandle& device_handle) const;
+ NpadControllerData& GetControllerFromNpadIdType(Core::HID::NpadIdType npad_id);
+ const NpadControllerData& GetControllerFromNpadIdType(Core::HID::NpadIdType npad_id) const;
+
+ std::shared_ptr<NPad> npad;
+ std::array<NpadControllerData, NPAD_COUNT> controller_data{};
+};
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/touchscreen.cpp b/src/core/hle/service/hid/controllers/touchscreen.cpp
index 3ef91df4b..fcd973414 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.cpp
+++ b/src/core/hle/service/hid/controllers/touchscreen.cpp
@@ -15,9 +15,9 @@
namespace Service::HID {
constexpr std::size_t SHARED_MEMORY_OFFSET = 0x400;
-Controller_Touchscreen::Controller_Touchscreen(Core::HID::HIDCore& hid_core_,
- u8* raw_shared_memory_)
- : ControllerBase{hid_core_} {
+TouchScreen::TouchScreen(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
+ : ControllerBase{hid_core_}, touchscreen_width(Layout::ScreenUndocked::Width),
+ touchscreen_height(Layout::ScreenUndocked::Height) {
static_assert(SHARED_MEMORY_OFFSET + sizeof(TouchSharedMemory) < shared_memory_size,
"TouchSharedMemory is bigger than the shared memory");
shared_memory = std::construct_at(
@@ -25,13 +25,13 @@ Controller_Touchscreen::Controller_Touchscreen(Core::HID::HIDCore& hid_core_,
console = hid_core.GetEmulatedConsole();
}
-Controller_Touchscreen::~Controller_Touchscreen() = default;
+TouchScreen::~TouchScreen() = default;
-void Controller_Touchscreen::OnInit() {}
+void TouchScreen::OnInit() {}
-void Controller_Touchscreen::OnRelease() {}
+void TouchScreen::OnRelease() {}
-void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void TouchScreen::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
shared_memory->touch_screen_lifo.timestamp = core_timing.GetGlobalTimeNs().count();
if (!IsControllerActivated()) {
@@ -96,8 +96,8 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
if (id < active_fingers_count) {
const auto& [active_x, active_y] = active_fingers[id].position;
touch_entry.position = {
- .x = static_cast<u16>(active_x * Layout::ScreenUndocked::Width),
- .y = static_cast<u16>(active_y * Layout::ScreenUndocked::Height),
+ .x = static_cast<u16>(active_x * static_cast<float>(touchscreen_width)),
+ .y = static_cast<u16>(active_y * static_cast<float>(touchscreen_height)),
};
touch_entry.diameter_x = Settings::values.touchscreen.diameter_x;
touch_entry.diameter_y = Settings::values.touchscreen.diameter_y;
@@ -121,4 +121,9 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
shared_memory->touch_screen_lifo.WriteNextEntry(next_state);
}
+void TouchScreen::SetTouchscreenDimensions(u32 width, u32 height) {
+ touchscreen_width = width;
+ touchscreen_height = height;
+}
+
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/touchscreen.h b/src/core/hle/service/hid/controllers/touchscreen.h
index dd00921fd..79f026a81 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.h
+++ b/src/core/hle/service/hid/controllers/touchscreen.h
@@ -14,10 +14,10 @@ class EmulatedConsole;
} // namespace Core::HID
namespace Service::HID {
-class Controller_Touchscreen final : public ControllerBase {
+class TouchScreen final : public ControllerBase {
public:
- explicit Controller_Touchscreen(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
- ~Controller_Touchscreen() override;
+ explicit TouchScreen(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
+ ~TouchScreen() override;
// Called when the controller is initialized
void OnInit() override;
@@ -28,6 +28,8 @@ public:
// When the controller is requesting an update for the shared memory
void OnUpdate(const Core::Timing::CoreTiming& core_timing) override;
+ void SetTouchscreenDimensions(u32 width, u32 height);
+
private:
static constexpr std::size_t MAX_FINGERS = 16;
@@ -53,5 +55,7 @@ private:
Core::HID::EmulatedConsole* console = nullptr;
std::array<Core::HID::TouchFinger, MAX_FINGERS> fingers{};
+ u32 touchscreen_width;
+ u32 touchscreen_height;
};
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/xpad.cpp b/src/core/hle/service/hid/controllers/xpad.cpp
index 62119e2c5..0aaed1fa7 100644
--- a/src/core/hle/service/hid/controllers/xpad.cpp
+++ b/src/core/hle/service/hid/controllers/xpad.cpp
@@ -10,20 +10,19 @@
namespace Service::HID {
constexpr std::size_t SHARED_MEMORY_OFFSET = 0x3C00;
-Controller_XPad::Controller_XPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_)
- : ControllerBase{hid_core_} {
+XPad::XPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_) : ControllerBase{hid_core_} {
static_assert(SHARED_MEMORY_OFFSET + sizeof(XpadSharedMemory) < shared_memory_size,
"XpadSharedMemory is bigger than the shared memory");
shared_memory = std::construct_at(
reinterpret_cast<XpadSharedMemory*>(raw_shared_memory_ + SHARED_MEMORY_OFFSET));
}
-Controller_XPad::~Controller_XPad() = default;
+XPad::~XPad() = default;
-void Controller_XPad::OnInit() {}
+void XPad::OnInit() {}
-void Controller_XPad::OnRelease() {}
+void XPad::OnRelease() {}
-void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
+void XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
if (!IsControllerActivated()) {
shared_memory->basic_xpad_lifo.buffer_count = 0;
shared_memory->basic_xpad_lifo.buffer_tail = 0;
diff --git a/src/core/hle/service/hid/controllers/xpad.h b/src/core/hle/service/hid/controllers/xpad.h
index d01dee5fc..9e63a317a 100644
--- a/src/core/hle/service/hid/controllers/xpad.h
+++ b/src/core/hle/service/hid/controllers/xpad.h
@@ -10,10 +10,10 @@
#include "core/hle/service/hid/ring_lifo.h"
namespace Service::HID {
-class Controller_XPad final : public ControllerBase {
+class XPad final : public ControllerBase {
public:
- explicit Controller_XPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
- ~Controller_XPad() override;
+ explicit XPad(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_);
+ ~XPad() override;
// Called when the controller is initialized
void OnInit() override;
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 4d70006c1..1b7381d8d 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -1,2860 +1,39 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <array>
-#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "common/settings.h"
-#include "core/core.h"
-#include "core/core_timing.h"
-#include "core/hid/hid_core.h"
-#include "core/hle/kernel/k_readable_event.h"
-#include "core/hle/kernel/k_shared_memory.h"
-#include "core/hle/kernel/k_transfer_memory.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/service/hid/errors.h"
#include "core/hle/service/hid/hid.h"
+#include "core/hle/service/hid/hid_debug_server.h"
+#include "core/hle/service/hid/hid_firmware_settings.h"
+#include "core/hle/service/hid/hid_server.h"
+#include "core/hle/service/hid/hid_system_server.h"
#include "core/hle/service/hid/hidbus.h"
#include "core/hle/service/hid/irs.h"
+#include "core/hle/service/hid/resource_manager.h"
#include "core/hle/service/hid/xcd.h"
-#include "core/hle/service/ipc_helpers.h"
#include "core/hle/service/server_manager.h"
-#include "core/memory.h"
-
-#include "core/hle/service/hid/controllers/console_sixaxis.h"
-#include "core/hle/service/hid/controllers/controller_base.h"
-#include "core/hle/service/hid/controllers/debug_pad.h"
-#include "core/hle/service/hid/controllers/gesture.h"
-#include "core/hle/service/hid/controllers/keyboard.h"
-#include "core/hle/service/hid/controllers/mouse.h"
-#include "core/hle/service/hid/controllers/npad.h"
-#include "core/hle/service/hid/controllers/palma.h"
-#include "core/hle/service/hid/controllers/stubbed.h"
-#include "core/hle/service/hid/controllers/touchscreen.h"
-#include "core/hle/service/hid/controllers/xpad.h"
namespace Service::HID {
-// Updating period for each HID device.
-// Period time is obtained by measuring the number of samples in a second on HW using a homebrew
-// Correct npad_update_ns is 4ms this is overclocked to lower input lag
-constexpr auto npad_update_ns = std::chrono::nanoseconds{1 * 1000 * 1000}; // (1ms, 1000Hz)
-constexpr auto default_update_ns = std::chrono::nanoseconds{4 * 1000 * 1000}; // (4ms, 1000Hz)
-constexpr auto mouse_keyboard_update_ns = std::chrono::nanoseconds{8 * 1000 * 1000}; // (8ms, 125Hz)
-constexpr auto motion_update_ns = std::chrono::nanoseconds{5 * 1000 * 1000}; // (5ms, 200Hz)
-
-IAppletResource::IAppletResource(Core::System& system_,
- KernelHelpers::ServiceContext& service_context_)
- : ServiceFramework{system_, "IAppletResource"}, service_context{service_context_} {
- static const FunctionInfo functions[] = {
- {0, &IAppletResource::GetSharedMemoryHandle, "GetSharedMemoryHandle"},
- };
- RegisterHandlers(functions);
- u8* shared_memory = system.Kernel().GetHidSharedMem().GetPointer();
- MakeController<Controller_DebugPad>(HidController::DebugPad, shared_memory);
- MakeController<Controller_Touchscreen>(HidController::Touchscreen, shared_memory);
- MakeController<Controller_Mouse>(HidController::Mouse, shared_memory);
- MakeController<Controller_Keyboard>(HidController::Keyboard, shared_memory);
- MakeController<Controller_XPad>(HidController::XPad, shared_memory);
- MakeController<Controller_Stubbed>(HidController::HomeButton, shared_memory);
- MakeController<Controller_Stubbed>(HidController::SleepButton, shared_memory);
- MakeController<Controller_Stubbed>(HidController::CaptureButton, shared_memory);
- MakeController<Controller_Stubbed>(HidController::InputDetector, shared_memory);
- MakeController<Controller_Stubbed>(HidController::UniquePad, shared_memory);
- MakeControllerWithServiceContext<Controller_NPad>(HidController::NPad, shared_memory);
- MakeController<Controller_Gesture>(HidController::Gesture, shared_memory);
- MakeController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor, shared_memory);
- MakeController<Controller_Stubbed>(HidController::DebugMouse, shared_memory);
- MakeControllerWithServiceContext<Controller_Palma>(HidController::Palma, shared_memory);
-
- // Homebrew doesn't try to activate some controllers, so we activate them by default
- GetController<Controller_NPad>(HidController::NPad).ActivateController();
- GetController<Controller_Touchscreen>(HidController::Touchscreen).ActivateController();
-
- GetController<Controller_Stubbed>(HidController::HomeButton).SetCommonHeaderOffset(0x4C00);
- GetController<Controller_Stubbed>(HidController::SleepButton).SetCommonHeaderOffset(0x4E00);
- GetController<Controller_Stubbed>(HidController::CaptureButton).SetCommonHeaderOffset(0x5000);
- GetController<Controller_Stubbed>(HidController::InputDetector).SetCommonHeaderOffset(0x5200);
- GetController<Controller_Stubbed>(HidController::UniquePad).SetCommonHeaderOffset(0x5A00);
- GetController<Controller_Stubbed>(HidController::DebugMouse).SetCommonHeaderOffset(0x3DC00);
-
- // Register update callbacks
- npad_update_event = Core::Timing::CreateEvent(
- "HID::UpdatePadCallback",
- [this](std::uintptr_t user_data, s64 time,
- std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
- const auto guard = LockService();
- UpdateNpad(user_data, ns_late);
- return std::nullopt;
- });
- default_update_event = Core::Timing::CreateEvent(
- "HID::UpdateDefaultCallback",
- [this](std::uintptr_t user_data, s64 time,
- std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
- const auto guard = LockService();
- UpdateControllers(user_data, ns_late);
- return std::nullopt;
- });
- mouse_keyboard_update_event = Core::Timing::CreateEvent(
- "HID::UpdateMouseKeyboardCallback",
- [this](std::uintptr_t user_data, s64 time,
- std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
- const auto guard = LockService();
- UpdateMouseKeyboard(user_data, ns_late);
- return std::nullopt;
- });
- motion_update_event = Core::Timing::CreateEvent(
- "HID::UpdateMotionCallback",
- [this](std::uintptr_t user_data, s64 time,
- std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
- const auto guard = LockService();
- UpdateMotion(user_data, ns_late);
- return std::nullopt;
- });
-
- system.CoreTiming().ScheduleLoopingEvent(npad_update_ns, npad_update_ns, npad_update_event);
- system.CoreTiming().ScheduleLoopingEvent(default_update_ns, default_update_ns,
- default_update_event);
- system.CoreTiming().ScheduleLoopingEvent(mouse_keyboard_update_ns, mouse_keyboard_update_ns,
- mouse_keyboard_update_event);
- system.CoreTiming().ScheduleLoopingEvent(motion_update_ns, motion_update_ns,
- motion_update_event);
-
- system.HIDCore().ReloadInputDevices();
-}
-
-void IAppletResource::ActivateController(HidController controller) {
- controllers[static_cast<size_t>(controller)]->ActivateController();
-}
-
-void IAppletResource::DeactivateController(HidController controller) {
- controllers[static_cast<size_t>(controller)]->DeactivateController();
-}
-
-IAppletResource::~IAppletResource() {
- system.CoreTiming().UnscheduleEvent(npad_update_event, 0);
- system.CoreTiming().UnscheduleEvent(default_update_event, 0);
- system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event, 0);
- system.CoreTiming().UnscheduleEvent(motion_update_event, 0);
-}
-
-void IAppletResource::GetSharedMemoryHandle(HLERequestContext& ctx) {
- LOG_DEBUG(Service_HID, "called");
-
- IPC::ResponseBuilder rb{ctx, 2, 1};
- rb.Push(ResultSuccess);
- rb.PushCopyObjects(&system.Kernel().GetHidSharedMem());
-}
-
-void IAppletResource::UpdateControllers(std::uintptr_t user_data,
- std::chrono::nanoseconds ns_late) {
- auto& core_timing = system.CoreTiming();
-
- for (const auto& controller : controllers) {
- // Keyboard has it's own update event
- if (controller == controllers[static_cast<size_t>(HidController::Keyboard)]) {
- continue;
- }
- // Mouse has it's own update event
- if (controller == controllers[static_cast<size_t>(HidController::Mouse)]) {
- continue;
- }
- // Npad has it's own update event
- if (controller == controllers[static_cast<size_t>(HidController::NPad)]) {
- continue;
- }
- controller->OnUpdate(core_timing);
- }
-}
-
-void IAppletResource::UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
- auto& core_timing = system.CoreTiming();
-
- controllers[static_cast<size_t>(HidController::NPad)]->OnUpdate(core_timing);
-}
-
-void IAppletResource::UpdateMouseKeyboard(std::uintptr_t user_data,
- std::chrono::nanoseconds ns_late) {
- auto& core_timing = system.CoreTiming();
-
- controllers[static_cast<size_t>(HidController::Mouse)]->OnUpdate(core_timing);
- controllers[static_cast<size_t>(HidController::Keyboard)]->OnUpdate(core_timing);
-}
-
-void IAppletResource::UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
- auto& core_timing = system.CoreTiming();
-
- controllers[static_cast<size_t>(HidController::NPad)]->OnMotionUpdate(core_timing);
-}
-
-class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
-public:
- explicit IActiveVibrationDeviceList(Core::System& system_,
- std::shared_ptr<IAppletResource> applet_resource_)
- : ServiceFramework{system_, "IActiveVibrationDeviceList"},
- applet_resource(applet_resource_) {
- // clang-format off
- static const FunctionInfo functions[] = {
- {0, &IActiveVibrationDeviceList::InitializeVibrationDevice, "InitializeVibrationDevice"},
- };
- // clang-format on
-
- RegisterHandlers(functions);
- }
-
-private:
- void InitializeVibrationDevice(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()};
-
- if (applet_resource != nullptr) {
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .InitializeVibrationDevice(vibration_device_handle);
- }
-
- LOG_DEBUG(Service_HID, "called, npad_type={}, npad_id={}, device_index={}",
- vibration_device_handle.npad_type, vibration_device_handle.npad_id,
- vibration_device_handle.device_index);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
- }
-
- std::shared_ptr<IAppletResource> applet_resource;
-};
-
-std::shared_ptr<IAppletResource> Hid::GetAppletResource() {
- if (applet_resource == nullptr) {
- applet_resource = std::make_shared<IAppletResource>(system, service_context);
- }
-
- return applet_resource;
-}
-
-Hid::Hid(Core::System& system_, std::shared_ptr<IAppletResource> applet_resource_)
- : ServiceFramework{system_, "hid"}, applet_resource{applet_resource_}, service_context{
- system_,
- service_name} {
- // clang-format off
- static const FunctionInfo functions[] = {
- {0, &Hid::CreateAppletResource, "CreateAppletResource"},
- {1, &Hid::ActivateDebugPad, "ActivateDebugPad"},
- {11, &Hid::ActivateTouchScreen, "ActivateTouchScreen"},
- {21, &Hid::ActivateMouse, "ActivateMouse"},
- {26, nullptr, "ActivateDebugMouse"},
- {31, &Hid::ActivateKeyboard, "ActivateKeyboard"},
- {32, &Hid::SendKeyboardLockKeyEvent, "SendKeyboardLockKeyEvent"},
- {40, nullptr, "AcquireXpadIdEventHandle"},
- {41, nullptr, "ReleaseXpadIdEventHandle"},
- {51, &Hid::ActivateXpad, "ActivateXpad"},
- {55, &Hid::GetXpadIDs, "GetXpadIds"},
- {56, nullptr, "ActivateJoyXpad"},
- {58, nullptr, "GetJoyXpadLifoHandle"},
- {59, nullptr, "GetJoyXpadIds"},
- {60, &Hid::ActivateSixAxisSensor, "ActivateSixAxisSensor"},
- {61, &Hid::DeactivateSixAxisSensor, "DeactivateSixAxisSensor"},
- {62, nullptr, "GetSixAxisSensorLifoHandle"},
- {63, nullptr, "ActivateJoySixAxisSensor"},
- {64, nullptr, "DeactivateJoySixAxisSensor"},
- {65, nullptr, "GetJoySixAxisSensorLifoHandle"},
- {66, &Hid::StartSixAxisSensor, "StartSixAxisSensor"},
- {67, &Hid::StopSixAxisSensor, "StopSixAxisSensor"},
- {68, &Hid::IsSixAxisSensorFusionEnabled, "IsSixAxisSensorFusionEnabled"},
- {69, &Hid::EnableSixAxisSensorFusion, "EnableSixAxisSensorFusion"},
- {70, &Hid::SetSixAxisSensorFusionParameters, "SetSixAxisSensorFusionParameters"},
- {71, &Hid::GetSixAxisSensorFusionParameters, "GetSixAxisSensorFusionParameters"},
- {72, &Hid::ResetSixAxisSensorFusionParameters, "ResetSixAxisSensorFusionParameters"},
- {73, nullptr, "SetAccelerometerParameters"},
- {74, nullptr, "GetAccelerometerParameters"},
- {75, nullptr, "ResetAccelerometerParameters"},
- {76, nullptr, "SetAccelerometerPlayMode"},
- {77, nullptr, "GetAccelerometerPlayMode"},
- {78, nullptr, "ResetAccelerometerPlayMode"},
- {79, &Hid::SetGyroscopeZeroDriftMode, "SetGyroscopeZeroDriftMode"},
- {80, &Hid::GetGyroscopeZeroDriftMode, "GetGyroscopeZeroDriftMode"},
- {81, &Hid::ResetGyroscopeZeroDriftMode, "ResetGyroscopeZeroDriftMode"},
- {82, &Hid::IsSixAxisSensorAtRest, "IsSixAxisSensorAtRest"},
- {83, &Hid::IsFirmwareUpdateAvailableForSixAxisSensor, "IsFirmwareUpdateAvailableForSixAxisSensor"},
- {84, &Hid::EnableSixAxisSensorUnalteredPassthrough, "EnableSixAxisSensorUnalteredPassthrough"},
- {85, &Hid::IsSixAxisSensorUnalteredPassthroughEnabled, "IsSixAxisSensorUnalteredPassthroughEnabled"},
- {86, nullptr, "StoreSixAxisSensorCalibrationParameter"},
- {87, &Hid::LoadSixAxisSensorCalibrationParameter, "LoadSixAxisSensorCalibrationParameter"},
- {88, &Hid::GetSixAxisSensorIcInformation, "GetSixAxisSensorIcInformation"},
- {89, &Hid::ResetIsSixAxisSensorDeviceNewlyAssigned, "ResetIsSixAxisSensorDeviceNewlyAssigned"},
- {91, &Hid::ActivateGesture, "ActivateGesture"},
- {100, &Hid::SetSupportedNpadStyleSet, "SetSupportedNpadStyleSet"},
- {101, &Hid::GetSupportedNpadStyleSet, "GetSupportedNpadStyleSet"},
- {102, &Hid::SetSupportedNpadIdType, "SetSupportedNpadIdType"},
- {103, &Hid::ActivateNpad, "ActivateNpad"},
- {104, &Hid::DeactivateNpad, "DeactivateNpad"},
- {106, &Hid::AcquireNpadStyleSetUpdateEventHandle, "AcquireNpadStyleSetUpdateEventHandle"},
- {107, &Hid::DisconnectNpad, "DisconnectNpad"},
- {108, &Hid::GetPlayerLedPattern, "GetPlayerLedPattern"},
- {109, &Hid::ActivateNpadWithRevision, "ActivateNpadWithRevision"},
- {120, &Hid::SetNpadJoyHoldType, "SetNpadJoyHoldType"},
- {121, &Hid::GetNpadJoyHoldType, "GetNpadJoyHoldType"},
- {122, &Hid::SetNpadJoyAssignmentModeSingleByDefault, "SetNpadJoyAssignmentModeSingleByDefault"},
- {123, &Hid::SetNpadJoyAssignmentModeSingle, "SetNpadJoyAssignmentModeSingle"},
- {124, &Hid::SetNpadJoyAssignmentModeDual, "SetNpadJoyAssignmentModeDual"},
- {125, &Hid::MergeSingleJoyAsDualJoy, "MergeSingleJoyAsDualJoy"},
- {126, &Hid::StartLrAssignmentMode, "StartLrAssignmentMode"},
- {127, &Hid::StopLrAssignmentMode, "StopLrAssignmentMode"},
- {128, &Hid::SetNpadHandheldActivationMode, "SetNpadHandheldActivationMode"},
- {129, &Hid::GetNpadHandheldActivationMode, "GetNpadHandheldActivationMode"},
- {130, &Hid::SwapNpadAssignment, "SwapNpadAssignment"},
- {131, &Hid::IsUnintendedHomeButtonInputProtectionEnabled, "IsUnintendedHomeButtonInputProtectionEnabled"},
- {132, &Hid::EnableUnintendedHomeButtonInputProtection, "EnableUnintendedHomeButtonInputProtection"},
- {133, &Hid::SetNpadJoyAssignmentModeSingleWithDestination, "SetNpadJoyAssignmentModeSingleWithDestination"},
- {134, &Hid::SetNpadAnalogStickUseCenterClamp, "SetNpadAnalogStickUseCenterClamp"},
- {135, &Hid::SetNpadCaptureButtonAssignment, "SetNpadCaptureButtonAssignment"},
- {136, &Hid::ClearNpadCaptureButtonAssignment, "ClearNpadCaptureButtonAssignment"},
- {200, &Hid::GetVibrationDeviceInfo, "GetVibrationDeviceInfo"},
- {201, &Hid::SendVibrationValue, "SendVibrationValue"},
- {202, &Hid::GetActualVibrationValue, "GetActualVibrationValue"},
- {203, &Hid::CreateActiveVibrationDeviceList, "CreateActiveVibrationDeviceList"},
- {204, &Hid::PermitVibration, "PermitVibration"},
- {205, &Hid::IsVibrationPermitted, "IsVibrationPermitted"},
- {206, &Hid::SendVibrationValues, "SendVibrationValues"},
- {207, &Hid::SendVibrationGcErmCommand, "SendVibrationGcErmCommand"},
- {208, &Hid::GetActualVibrationGcErmCommand, "GetActualVibrationGcErmCommand"},
- {209, &Hid::BeginPermitVibrationSession, "BeginPermitVibrationSession"},
- {210, &Hid::EndPermitVibrationSession, "EndPermitVibrationSession"},
- {211, &Hid::IsVibrationDeviceMounted, "IsVibrationDeviceMounted"},
- {212, nullptr, "SendVibrationValueInBool"},
- {300, &Hid::ActivateConsoleSixAxisSensor, "ActivateConsoleSixAxisSensor"},
- {301, &Hid::StartConsoleSixAxisSensor, "StartConsoleSixAxisSensor"},
- {302, &Hid::StopConsoleSixAxisSensor, "StopConsoleSixAxisSensor"},
- {303, &Hid::ActivateSevenSixAxisSensor, "ActivateSevenSixAxisSensor"},
- {304, &Hid::StartSevenSixAxisSensor, "StartSevenSixAxisSensor"},
- {305, &Hid::StopSevenSixAxisSensor, "StopSevenSixAxisSensor"},
- {306, &Hid::InitializeSevenSixAxisSensor, "InitializeSevenSixAxisSensor"},
- {307, &Hid::FinalizeSevenSixAxisSensor, "FinalizeSevenSixAxisSensor"},
- {308, nullptr, "SetSevenSixAxisSensorFusionStrength"},
- {309, nullptr, "GetSevenSixAxisSensorFusionStrength"},
- {310, &Hid::ResetSevenSixAxisSensorTimestamp, "ResetSevenSixAxisSensorTimestamp"},
- {400, &Hid::IsUsbFullKeyControllerEnabled, "IsUsbFullKeyControllerEnabled"},
- {401, nullptr, "EnableUsbFullKeyController"},
- {402, nullptr, "IsUsbFullKeyControllerConnected"},
- {403, nullptr, "HasBattery"},
- {404, nullptr, "HasLeftRightBattery"},
- {405, nullptr, "GetNpadInterfaceType"},
- {406, nullptr, "GetNpadLeftRightInterfaceType"},
- {407, nullptr, "GetNpadOfHighestBatteryLevel"},
- {408, nullptr, "GetNpadOfHighestBatteryLevelForJoyRight"},
- {500, &Hid::GetPalmaConnectionHandle, "GetPalmaConnectionHandle"},
- {501, &Hid::InitializePalma, "InitializePalma"},
- {502, &Hid::AcquirePalmaOperationCompleteEvent, "AcquirePalmaOperationCompleteEvent"},
- {503, &Hid::GetPalmaOperationInfo, "GetPalmaOperationInfo"},
- {504, &Hid::PlayPalmaActivity, "PlayPalmaActivity"},
- {505, &Hid::SetPalmaFrModeType, "SetPalmaFrModeType"},
- {506, &Hid::ReadPalmaStep, "ReadPalmaStep"},
- {507, &Hid::EnablePalmaStep, "EnablePalmaStep"},
- {508, &Hid::ResetPalmaStep, "ResetPalmaStep"},
- {509, &Hid::ReadPalmaApplicationSection, "ReadPalmaApplicationSection"},
- {510, &Hid::WritePalmaApplicationSection, "WritePalmaApplicationSection"},
- {511, &Hid::ReadPalmaUniqueCode, "ReadPalmaUniqueCode"},
- {512, &Hid::SetPalmaUniqueCodeInvalid, "SetPalmaUniqueCodeInvalid"},
- {513, &Hid::WritePalmaActivityEntry, "WritePalmaActivityEntry"},
- {514, &Hid::WritePalmaRgbLedPatternEntry, "WritePalmaRgbLedPatternEntry"},
- {515, &Hid::WritePalmaWaveEntry, "WritePalmaWaveEntry"},
- {516, &Hid::SetPalmaDataBaseIdentificationVersion, "SetPalmaDataBaseIdentificationVersion"},
- {517, &Hid::GetPalmaDataBaseIdentificationVersion, "GetPalmaDataBaseIdentificationVersion"},
- {518, &Hid::SuspendPalmaFeature, "SuspendPalmaFeature"},
- {519, &Hid::GetPalmaOperationResult, "GetPalmaOperationResult"},
- {520, &Hid::ReadPalmaPlayLog, "ReadPalmaPlayLog"},
- {521, &Hid::ResetPalmaPlayLog, "ResetPalmaPlayLog"},
- {522, &Hid::SetIsPalmaAllConnectable, "SetIsPalmaAllConnectable"},
- {523, &Hid::SetIsPalmaPairedConnectable, "SetIsPalmaPairedConnectable"},
- {524, &Hid::PairPalma, "PairPalma"},
- {525, &Hid::SetPalmaBoostMode, "SetPalmaBoostMode"},
- {526, &Hid::CancelWritePalmaWaveEntry, "CancelWritePalmaWaveEntry"},
- {527, &Hid::EnablePalmaBoostMode, "EnablePalmaBoostMode"},
- {528, &Hid::GetPalmaBluetoothAddress, "GetPalmaBluetoothAddress"},
- {529, &Hid::SetDisallowedPalmaConnection, "SetDisallowedPalmaConnection"},
- {1000, &Hid::SetNpadCommunicationMode, "SetNpadCommunicationMode"},
- {1001, &Hid::GetNpadCommunicationMode, "GetNpadCommunicationMode"},
- {1002, &Hid::SetTouchScreenConfiguration, "SetTouchScreenConfiguration"},
- {1003, &Hid::IsFirmwareUpdateNeededForNotification, "IsFirmwareUpdateNeededForNotification"},
- {2000, nullptr, "ActivateDigitizer"},
- };
- // clang-format on
-
- RegisterHandlers(functions);
-}
-
-Hid::~Hid() = default;
-
-void Hid::CreateAppletResource(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- if (applet_resource == nullptr) {
- applet_resource = std::make_shared<IAppletResource>(system, service_context);
- }
-
- IPC::ResponseBuilder rb{ctx, 2, 0, 1};
- rb.Push(ResultSuccess);
- rb.PushIpcInterface<IAppletResource>(applet_resource);
-}
-
-void Hid::ActivateDebugPad(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->ActivateController(HidController::DebugPad);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ActivateTouchScreen(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->ActivateController(HidController::Touchscreen);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ActivateMouse(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->ActivateController(HidController::Mouse);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ActivateKeyboard(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->ActivateController(HidController::Keyboard);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SendKeyboardLockKeyEvent(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto flags{rp.Pop<u32>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called. flags={}", flags);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ActivateXpad(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- u32 basic_xpad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- applet_resource->ActivateController(HidController::XPad);
-
- LOG_DEBUG(Service_HID, "called, basic_xpad_id={}, applet_resource_user_id={}",
- parameters.basic_xpad_id, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetXpadIDs(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_DEBUG(Service_HID, "(STUBBED) called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(0);
-}
-
-void Hid::ActivateSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- u32 basic_xpad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- // This function does nothing on 10.0.0+
-
- LOG_WARNING(Service_HID, "(STUBBED) called, basic_xpad_id={}, applet_resource_user_id={}",
- parameters.basic_xpad_id, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::DeactivateSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- u32 basic_xpad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- // This function does nothing on 10.0.0+
-
- LOG_WARNING(Service_HID, "(STUBBED) called, basic_xpad_id={}, applet_resource_user_id={}",
- parameters.basic_xpad_id, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::StartSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.SetSixAxisEnabled(parameters.sixaxis_handle, true);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::StopSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.SetSixAxisEnabled(parameters.sixaxis_handle, false);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::IsSixAxisSensorFusionEnabled(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- bool is_enabled{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result =
- controller.IsSixAxisSensorFusionEnabled(parameters.sixaxis_handle, is_enabled);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(result);
- rb.Push(is_enabled);
-}
-
-void Hid::EnableSixAxisSensorFusion(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- bool enable_sixaxis_sensor_fusion;
- INSERT_PADDING_BYTES_NOINIT(3);
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.SetSixAxisFusionEnabled(parameters.sixaxis_handle,
- parameters.enable_sixaxis_sensor_fusion);
-
- LOG_DEBUG(Service_HID,
- "called, enable_sixaxis_sensor_fusion={}, npad_type={}, npad_id={}, "
- "device_index={}, applet_resource_user_id={}",
- parameters.enable_sixaxis_sensor_fusion, parameters.sixaxis_handle.npad_type,
- parameters.sixaxis_handle.npad_id, parameters.sixaxis_handle.device_index,
- parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::SetSixAxisSensorFusionParameters(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- Core::HID::SixAxisSensorFusionParameters sixaxis_fusion;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result =
- controller.SetSixAxisFusionParameters(parameters.sixaxis_handle, parameters.sixaxis_fusion);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, parameter1={}, "
- "parameter2={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.sixaxis_fusion.parameter1,
- parameters.sixaxis_fusion.parameter2, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::GetSixAxisSensorFusionParameters(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- Core::HID::SixAxisSensorFusionParameters fusion_parameters{};
- const auto& controller =
- GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result =
- controller.GetSixAxisFusionParameters(parameters.sixaxis_handle, fusion_parameters);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(result);
- rb.PushRaw(fusion_parameters);
-}
-
-void Hid::ResetSixAxisSensorFusionParameters(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- // Since these parameters are unknown just use what HW outputs
- const Core::HID::SixAxisSensorFusionParameters fusion_parameters{
- .parameter1 = 0.03f,
- .parameter2 = 0.4f,
- };
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result1 =
- controller.SetSixAxisFusionParameters(parameters.sixaxis_handle, fusion_parameters);
- const auto result2 = controller.SetSixAxisFusionEnabled(parameters.sixaxis_handle, true);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- if (result1.IsError()) {
- rb.Push(result1);
- return;
- }
- rb.Push(result2);
-}
-
-void Hid::SetGyroscopeZeroDriftMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto sixaxis_handle{rp.PopRaw<Core::HID::SixAxisSensorHandle>()};
- const auto drift_mode{rp.PopEnum<Core::HID::GyroscopeZeroDriftMode>()};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.SetGyroscopeZeroDriftMode(sixaxis_handle, drift_mode);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, drift_mode={}, "
- "applet_resource_user_id={}",
- sixaxis_handle.npad_type, sixaxis_handle.npad_id, sixaxis_handle.device_index,
- drift_mode, applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::GetGyroscopeZeroDriftMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto drift_mode{Core::HID::GyroscopeZeroDriftMode::Standard};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.GetGyroscopeZeroDriftMode(parameters.sixaxis_handle, drift_mode);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(result);
- rb.PushEnum(drift_mode);
-}
-
-void Hid::ResetGyroscopeZeroDriftMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- const auto drift_mode{Core::HID::GyroscopeZeroDriftMode::Standard};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.SetGyroscopeZeroDriftMode(parameters.sixaxis_handle, drift_mode);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::IsSixAxisSensorAtRest(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- bool is_at_rest{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.IsSixAxisSensorAtRest(parameters.sixaxis_handle, is_at_rest);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(is_at_rest);
-}
-
-void Hid::IsFirmwareUpdateAvailableForSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- bool is_firmware_available{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.IsFirmwareUpdateAvailableForSixAxisSensor(parameters.sixaxis_handle,
- is_firmware_available);
-
- LOG_WARNING(
- Service_HID,
- "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(is_firmware_available);
-}
-
-void Hid::EnableSixAxisSensorUnalteredPassthrough(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- bool enabled;
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.EnableSixAxisSensorUnalteredPassthrough(
- parameters.sixaxis_handle, parameters.enabled);
-
- LOG_DEBUG(Service_HID,
- "(STUBBED) called, enabled={}, npad_type={}, npad_id={}, device_index={}, "
- "applet_resource_user_id={}",
- parameters.enabled, parameters.sixaxis_handle.npad_type,
- parameters.sixaxis_handle.npad_id, parameters.sixaxis_handle.device_index,
- parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::IsSixAxisSensorUnalteredPassthroughEnabled(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- bool is_unaltered_sisxaxis_enabled{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.IsSixAxisSensorUnalteredPassthroughEnabled(
- parameters.sixaxis_handle, is_unaltered_sisxaxis_enabled);
-
- LOG_DEBUG(
- Service_HID,
- "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(result);
- rb.Push(is_unaltered_sisxaxis_enabled);
-}
-
-void Hid::LoadSixAxisSensorCalibrationParameter(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- Core::HID::SixAxisSensorCalibrationParameter calibration{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result =
- controller.LoadSixAxisSensorCalibrationParameter(parameters.sixaxis_handle, calibration);
-
- LOG_WARNING(
- Service_HID,
- "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- if (result.IsSuccess()) {
- ctx.WriteBuffer(calibration);
- }
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::GetSixAxisSensorIcInformation(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- Core::HID::SixAxisSensorIcInformation ic_information{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result =
- controller.GetSixAxisSensorIcInformation(parameters.sixaxis_handle, ic_information);
-
- LOG_WARNING(
- Service_HID,
- "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- if (result.IsSuccess()) {
- ctx.WriteBuffer(ic_information);
- }
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::ResetIsSixAxisSensorDeviceNewlyAssigned(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::SixAxisSensorHandle sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result =
- controller.ResetIsSixAxisSensorDeviceNewlyAssigned(parameters.sixaxis_handle);
-
- LOG_WARNING(
- Service_HID,
- "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
- parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::ActivateGesture(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- u32 unknown;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- applet_resource->ActivateController(HidController::Gesture);
-
- LOG_WARNING(Service_HID, "(STUBBED) called, unknown={}, applet_resource_user_id={}",
- parameters.unknown, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetSupportedNpadStyleSet(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadStyleSet supported_styleset;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SetSupportedStyleSet({parameters.supported_styleset});
-
- LOG_DEBUG(Service_HID, "called, supported_styleset={}, applet_resource_user_id={}",
- parameters.supported_styleset, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetSupportedNpadStyleSet(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.PushEnum(applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .GetSupportedStyleSet()
- .raw);
-}
-
-void Hid::SetSupportedNpadIdType(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- const auto result = applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SetSupportedNpadIdTypes(ctx.ReadBuffer());
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::ActivateNpad(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->ActivateController(HidController::NPad);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::DeactivateNpad(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->DeactivateController(HidController::NPad);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::AcquireNpadStyleSetUpdateEventHandle(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- u64 unknown;
- };
- static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}, unknown={}",
- parameters.npad_id, parameters.applet_resource_user_id, parameters.unknown);
-
- // Games expect this event to be signaled after calling this function
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SignalStyleSetChangedEvent(parameters.npad_id);
-
- IPC::ResponseBuilder rb{ctx, 2, 1};
- rb.Push(ResultSuccess);
- rb.PushCopyObjects(applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .GetStyleSetChangedEvent(parameters.npad_id));
-}
-
-void Hid::DisconnectNpad(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.DisconnectNpad(parameters.npad_id);
-
- LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
- parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetPlayerLedPattern(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto npad_id{rp.PopEnum<Core::HID::NpadIdType>()};
-
- Core::HID::LedPattern pattern{0, 0, 0, 0};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.GetLedPattern(npad_id, pattern);
-
- LOG_DEBUG(Service_HID, "called, npad_id={}", npad_id);
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(result);
- rb.Push(pattern.raw);
-}
-
-void Hid::ActivateNpadWithRevision(HLERequestContext& ctx) {
- // Should have no effect with how our npad sets up the data
- IPC::RequestParser rp{ctx};
- struct Parameters {
- s32 revision;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- applet_resource->ActivateController(HidController::NPad);
-
- LOG_DEBUG(Service_HID, "called, revision={}, applet_resource_user_id={}", parameters.revision,
- parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetNpadJoyHoldType(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
- const auto hold_type{rp.PopEnum<Controller_NPad::NpadJoyHoldType>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad).SetHoldType(hold_type);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, hold_type={}",
- applet_resource_user_id, hold_type);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetNpadJoyHoldType(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(ResultSuccess);
- rb.PushEnum(applet_resource->GetController<Controller_NPad>(HidController::NPad).GetHoldType());
-}
-
-void Hid::SetNpadJoyAssignmentModeSingleByDefault(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- Core::HID::NpadIdType new_npad_id{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.SetNpadMode(new_npad_id, parameters.npad_id,
- Controller_NPad::NpadJoyDeviceType::Left,
- Controller_NPad::NpadJoyAssignmentMode::Single);
-
- LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
- parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetNpadJoyAssignmentModeSingle(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- Controller_NPad::NpadJoyDeviceType npad_joy_device_type;
- };
- static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- Core::HID::NpadIdType new_npad_id{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.SetNpadMode(new_npad_id, parameters.npad_id, parameters.npad_joy_device_type,
- Controller_NPad::NpadJoyAssignmentMode::Single);
-
- LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
- parameters.npad_id, parameters.applet_resource_user_id,
- parameters.npad_joy_device_type);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetNpadJoyAssignmentModeDual(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- Core::HID::NpadIdType new_npad_id{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- controller.SetNpadMode(new_npad_id, parameters.npad_id, {},
- Controller_NPad::NpadJoyAssignmentMode::Dual);
-
- LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
- parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::MergeSingleJoyAsDualJoy(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto npad_id_1{rp.PopEnum<Core::HID::NpadIdType>()};
- const auto npad_id_2{rp.PopEnum<Core::HID::NpadIdType>()};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.MergeSingleJoyAsDualJoy(npad_id_1, npad_id_2);
-
- LOG_DEBUG(Service_HID, "called, npad_id_1={}, npad_id_2={}, applet_resource_user_id={}",
- npad_id_1, npad_id_2, applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::StartLrAssignmentMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad).StartLRAssignmentMode();
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::StopLrAssignmentMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad).StopLRAssignmentMode();
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetNpadHandheldActivationMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
- const auto activation_mode{rp.PopEnum<Controller_NPad::NpadHandheldActivationMode>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SetNpadHandheldActivationMode(activation_mode);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, activation_mode={}",
- applet_resource_user_id, activation_mode);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetNpadHandheldActivationMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(ResultSuccess);
- rb.PushEnum(applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .GetNpadHandheldActivationMode());
-}
-
-void Hid::SwapNpadAssignment(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto npad_id_1{rp.PopEnum<Core::HID::NpadIdType>()};
- const auto npad_id_2{rp.PopEnum<Core::HID::NpadIdType>()};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.SwapNpadAssignment(npad_id_1, npad_id_2);
-
- LOG_DEBUG(Service_HID, "called, npad_id_1={}, npad_id_2={}, applet_resource_user_id={}",
- npad_id_1, npad_id_2, applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::IsUnintendedHomeButtonInputProtectionEnabled(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- bool is_enabled = false;
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result =
- controller.IsUnintendedHomeButtonInputProtectionEnabled(parameters.npad_id, is_enabled);
-
- LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}",
- parameters.npad_id, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(result);
- rb.Push(is_enabled);
-}
-
-void Hid::EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- bool unintended_home_button_input_protection;
- INSERT_PADDING_BYTES_NOINIT(3);
- Core::HID::NpadIdType npad_id;
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto result = controller.SetUnintendedHomeButtonInputProtectionEnabled(
- parameters.unintended_home_button_input_protection, parameters.npad_id);
-
- LOG_WARNING(Service_HID,
- "(STUBBED) called, unintended_home_button_input_protection={}, npad_id={},"
- "applet_resource_user_id={}",
- parameters.unintended_home_button_input_protection, parameters.npad_id,
- parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::SetNpadJoyAssignmentModeSingleWithDestination(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- Controller_NPad::NpadJoyDeviceType npad_joy_device_type;
- };
- static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- Core::HID::NpadIdType new_npad_id{};
- auto& controller = GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
- const auto is_reassigned =
- controller.SetNpadMode(new_npad_id, parameters.npad_id, parameters.npad_joy_device_type,
- Controller_NPad::NpadJoyAssignmentMode::Single);
-
- LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
- parameters.npad_id, parameters.applet_resource_user_id,
- parameters.npad_joy_device_type);
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(ResultSuccess);
- rb.Push(is_reassigned);
- rb.PushEnum(new_npad_id);
-}
-
-void Hid::SetNpadAnalogStickUseCenterClamp(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- bool analog_stick_use_center_clamp;
- INSERT_PADDING_BYTES_NOINIT(7);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- GetAppletResource()
- ->GetController<Controller_NPad>(HidController::NPad)
- .SetAnalogStickUseCenterClamp(parameters.analog_stick_use_center_clamp);
-
- LOG_WARNING(Service_HID,
- "(STUBBED) called, analog_stick_use_center_clamp={}, applet_resource_user_id={}",
- parameters.analog_stick_use_center_clamp, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetNpadCaptureButtonAssignment(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadStyleSet npad_styleset;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- Core::HID::NpadButton button;
- };
- static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID,
- "(STUBBED) called, npad_styleset={}, applet_resource_user_id={}, button={}",
- parameters.npad_styleset, parameters.applet_resource_user_id, parameters.button);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ClearNpadCaptureButtonAssignment(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
- applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetVibrationDeviceInfo(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()};
- const auto& controller =
- GetAppletResource()->GetController<Controller_NPad>(HidController::NPad);
-
- Core::HID::VibrationDeviceInfo vibration_device_info;
- bool check_device_index = false;
-
- switch (vibration_device_handle.npad_type) {
- case Core::HID::NpadStyleIndex::ProController:
- case Core::HID::NpadStyleIndex::Handheld:
- case Core::HID::NpadStyleIndex::JoyconDual:
- case Core::HID::NpadStyleIndex::JoyconLeft:
- case Core::HID::NpadStyleIndex::JoyconRight:
- vibration_device_info.type = Core::HID::VibrationDeviceType::LinearResonantActuator;
- check_device_index = true;
- break;
- case Core::HID::NpadStyleIndex::GameCube:
- vibration_device_info.type = Core::HID::VibrationDeviceType::GcErm;
- break;
- case Core::HID::NpadStyleIndex::N64:
- vibration_device_info.type = Core::HID::VibrationDeviceType::N64;
- break;
- default:
- vibration_device_info.type = Core::HID::VibrationDeviceType::Unknown;
- break;
- }
-
- vibration_device_info.position = Core::HID::VibrationDevicePosition::None;
- if (check_device_index) {
- switch (vibration_device_handle.device_index) {
- case Core::HID::DeviceIndex::Left:
- vibration_device_info.position = Core::HID::VibrationDevicePosition::Left;
- break;
- case Core::HID::DeviceIndex::Right:
- vibration_device_info.position = Core::HID::VibrationDevicePosition::Right;
- break;
- case Core::HID::DeviceIndex::None:
- default:
- ASSERT_MSG(false, "DeviceIndex should never be None!");
- break;
- }
- }
-
- LOG_DEBUG(Service_HID, "called, vibration_device_type={}, vibration_device_position={}",
- vibration_device_info.type, vibration_device_info.position);
-
- const auto result = controller.IsDeviceHandleValid(vibration_device_handle);
- if (result.IsError()) {
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
- return;
- }
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(ResultSuccess);
- rb.PushRaw(vibration_device_info);
-}
-
-void Hid::SendVibrationValue(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::VibrationDeviceHandle vibration_device_handle;
- Core::HID::VibrationValue vibration_value;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x20, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .VibrateController(parameters.vibration_device_handle, parameters.vibration_value);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.vibration_device_handle.npad_type,
- parameters.vibration_device_handle.npad_id,
- parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetActualVibrationValue(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::VibrationDeviceHandle vibration_device_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.vibration_device_handle.npad_type,
- parameters.vibration_device_handle.npad_id,
- parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 6};
- rb.Push(ResultSuccess);
- rb.PushRaw(applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .GetLastVibration(parameters.vibration_device_handle));
-}
-
-void Hid::CreateActiveVibrationDeviceList(HLERequestContext& ctx) {
- LOG_DEBUG(Service_HID, "called");
-
- IPC::ResponseBuilder rb{ctx, 2, 0, 1};
- rb.Push(ResultSuccess);
- rb.PushIpcInterface<IActiveVibrationDeviceList>(system, applet_resource);
-}
-
-void Hid::PermitVibration(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto can_vibrate{rp.Pop<bool>()};
-
- // nnSDK saves this value as a float. Since it can only be 1.0f or 0.0f we simplify this value
- // by converting it to a bool
- Settings::values.vibration_enabled.SetValue(can_vibrate);
-
- LOG_DEBUG(Service_HID, "called, can_vibrate={}", can_vibrate);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::IsVibrationPermitted(HLERequestContext& ctx) {
- LOG_DEBUG(Service_HID, "called");
-
- // nnSDK checks if a float is greater than zero. We return the bool we stored earlier
- const auto is_enabled = Settings::values.vibration_enabled.GetValue();
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(is_enabled);
-}
-
-void Hid::SendVibrationValues(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- const auto handle_data = ctx.ReadBuffer(0);
- const auto handle_count = ctx.GetReadBufferNumElements<Core::HID::VibrationDeviceHandle>(0);
- const auto vibration_data = ctx.ReadBuffer(1);
- const auto vibration_count = ctx.GetReadBufferNumElements<Core::HID::VibrationValue>(1);
-
- auto vibration_device_handles =
- std::span(reinterpret_cast<const Core::HID::VibrationDeviceHandle*>(handle_data.data()),
- handle_count);
- auto vibration_values = std::span(
- reinterpret_cast<const Core::HID::VibrationValue*>(vibration_data.data()), vibration_count);
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .VibrateControllers(vibration_device_handles, vibration_values);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SendVibrationGcErmCommand(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::VibrationDeviceHandle vibration_device_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- Core::HID::VibrationGcErmCommand gc_erm_command;
- };
- static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- /**
- * Note: This uses yuzu-specific behavior such that the StopHard command produces
- * vibrations where freq_low == 0.0f and freq_high == 0.0f, as defined below,
- * in order to differentiate between Stop and StopHard commands.
- * This is done to reuse the controller vibration functions made for regular controllers.
- */
- const auto vibration_value = [parameters] {
- switch (parameters.gc_erm_command) {
- case Core::HID::VibrationGcErmCommand::Stop:
- return Core::HID::VibrationValue{
- .low_amplitude = 0.0f,
- .low_frequency = 160.0f,
- .high_amplitude = 0.0f,
- .high_frequency = 320.0f,
- };
- case Core::HID::VibrationGcErmCommand::Start:
- return Core::HID::VibrationValue{
- .low_amplitude = 1.0f,
- .low_frequency = 160.0f,
- .high_amplitude = 1.0f,
- .high_frequency = 320.0f,
- };
- case Core::HID::VibrationGcErmCommand::StopHard:
- return Core::HID::VibrationValue{
- .low_amplitude = 0.0f,
- .low_frequency = 0.0f,
- .high_amplitude = 0.0f,
- .high_frequency = 0.0f,
- };
- default:
- return Core::HID::DEFAULT_VIBRATION_VALUE;
- }
- }();
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .VibrateController(parameters.vibration_device_handle, vibration_value);
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}, "
- "gc_erm_command={}",
- parameters.vibration_device_handle.npad_type,
- parameters.vibration_device_handle.npad_id,
- parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id,
- parameters.gc_erm_command);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetActualVibrationGcErmCommand(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::VibrationDeviceHandle vibration_device_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- const auto last_vibration = applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .GetLastVibration(parameters.vibration_device_handle);
-
- const auto gc_erm_command = [last_vibration] {
- if (last_vibration.low_amplitude != 0.0f || last_vibration.high_amplitude != 0.0f) {
- return Core::HID::VibrationGcErmCommand::Start;
- }
-
- /**
- * Note: This uses yuzu-specific behavior such that the StopHard command produces
- * vibrations where freq_low == 0.0f and freq_high == 0.0f, as defined in the HID function
- * SendVibrationGcErmCommand, in order to differentiate between Stop and StopHard commands.
- * This is done to reuse the controller vibration functions made for regular controllers.
- */
- if (last_vibration.low_frequency == 0.0f && last_vibration.high_frequency == 0.0f) {
- return Core::HID::VibrationGcErmCommand::StopHard;
- }
-
- return Core::HID::VibrationGcErmCommand::Stop;
- }();
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.vibration_device_handle.npad_type,
- parameters.vibration_device_handle.npad_id,
- parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(ResultSuccess);
- rb.PushEnum(gc_erm_command);
-}
-
-void Hid::BeginPermitVibrationSession(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SetPermitVibrationSession(true);
-
- LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::EndPermitVibrationSession(HLERequestContext& ctx) {
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SetPermitVibrationSession(false);
-
- LOG_DEBUG(Service_HID, "called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::IsVibrationDeviceMounted(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::VibrationDeviceHandle vibration_device_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_DEBUG(Service_HID,
- "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
- parameters.vibration_device_handle.npad_type,
- parameters.vibration_device_handle.npad_id,
- parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .IsVibrationDeviceMounted(parameters.vibration_device_handle));
-}
-
-void Hid::ActivateConsoleSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->ActivateController(HidController::ConsoleSixAxisSensor);
-
- LOG_WARNING(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::StartConsoleSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::ConsoleSixAxisSensorHandle console_sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID,
- "(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
- parameters.console_sixaxis_handle.unknown_1,
- parameters.console_sixaxis_handle.unknown_2, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::StopConsoleSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::ConsoleSixAxisSensorHandle console_sixaxis_handle;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID,
- "(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
- parameters.console_sixaxis_handle.unknown_1,
- parameters.console_sixaxis_handle.unknown_2, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ActivateSevenSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->ActivateController(HidController::ConsoleSixAxisSensor);
-
- LOG_WARNING(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::StartSevenSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
- applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::StopSevenSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
- applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::InitializeSevenSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
- const auto t_mem_1_size{rp.Pop<u64>()};
- const auto t_mem_2_size{rp.Pop<u64>()};
- const auto t_mem_1_handle{ctx.GetCopyHandle(0)};
- const auto t_mem_2_handle{ctx.GetCopyHandle(1)};
-
- ASSERT_MSG(t_mem_1_size == 0x1000, "t_mem_1_size is not 0x1000 bytes");
- ASSERT_MSG(t_mem_2_size == 0x7F000, "t_mem_2_size is not 0x7F000 bytes");
-
- auto t_mem_1 = system.ApplicationProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(
- t_mem_1_handle);
-
- if (t_mem_1.IsNull()) {
- LOG_ERROR(Service_HID, "t_mem_1 is a nullptr for handle=0x{:08X}", t_mem_1_handle);
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultUnknown);
- return;
- }
-
- auto t_mem_2 = system.ApplicationProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(
- t_mem_2_handle);
-
- if (t_mem_2.IsNull()) {
- LOG_ERROR(Service_HID, "t_mem_2 is a nullptr for handle=0x{:08X}", t_mem_2_handle);
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultUnknown);
- return;
- }
-
- ASSERT_MSG(t_mem_1->GetSize() == 0x1000, "t_mem_1 has incorrect size");
- ASSERT_MSG(t_mem_2->GetSize() == 0x7F000, "t_mem_2 has incorrect size");
-
- // Activate console six axis controller
- applet_resource->GetController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor)
- .ActivateController();
-
- applet_resource->GetController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor)
- .SetTransferMemoryAddress(t_mem_1->GetSourceAddress());
-
- LOG_WARNING(Service_HID,
- "called, t_mem_1_handle=0x{:08X}, t_mem_2_handle=0x{:08X}, "
- "applet_resource_user_id={}",
- t_mem_1_handle, t_mem_2_handle, applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::FinalizeSevenSixAxisSensor(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
- applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ResetSevenSixAxisSensorTimestamp(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- applet_resource->GetController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor)
- .ResetTimestamp();
-
- LOG_WARNING(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::IsUsbFullKeyControllerEnabled(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
-
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(false);
-}
-
-void Hid::GetPalmaConnectionHandle(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- Core::HID::NpadIdType npad_id;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}",
- parameters.npad_id, parameters.applet_resource_user_id);
-
- Controller_Palma::PalmaConnectionHandle handle;
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result = controller.GetPalmaConnectionHandle(parameters.npad_id, handle);
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(result);
- rb.PushRaw(handle);
-}
-
-void Hid::InitializePalma(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result = controller.InitializePalma(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::AcquirePalmaOperationCompleteEvent(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
-
- IPC::ResponseBuilder rb{ctx, 2, 1};
- rb.Push(ResultSuccess);
- rb.PushCopyObjects(controller.AcquirePalmaOperationCompleteEvent(connection_handle));
-}
-
-void Hid::GetPalmaOperationInfo(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- Controller_Palma::PalmaOperationType operation_type;
- Controller_Palma::PalmaOperationData data;
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result = controller.GetPalmaOperationInfo(connection_handle, operation_type, data);
-
- if (result.IsError()) {
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
- }
-
- ctx.WriteBuffer(data);
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(result);
- rb.Push(static_cast<u64>(operation_type));
-}
-
-void Hid::PlayPalmaActivity(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
- const auto palma_activity{rp.Pop<u64>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, palma_activity={}",
- connection_handle.npad_id, palma_activity);
-
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result = controller.PlayPalmaActivity(connection_handle, palma_activity);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::SetPalmaFrModeType(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
- const auto fr_mode{rp.PopEnum<Controller_Palma::PalmaFrModeType>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, fr_mode={}",
- connection_handle.npad_id, fr_mode);
-
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result = controller.SetPalmaFrModeType(connection_handle, fr_mode);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::ReadPalmaStep(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result = controller.ReadPalmaStep(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::EnablePalmaStep(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- bool is_enabled;
- INSERT_PADDING_WORDS_NOINIT(1);
- Controller_Palma::PalmaConnectionHandle connection_handle;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, is_enabled={}",
- parameters.connection_handle.npad_id, parameters.is_enabled);
-
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result =
- controller.EnablePalmaStep(parameters.connection_handle, parameters.is_enabled);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::ResetPalmaStep(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
- const auto result = controller.ResetPalmaStep(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::ReadPalmaApplicationSection(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::WritePalmaApplicationSection(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ReadPalmaUniqueCode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .ReadPalmaUniqueCode(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetPalmaUniqueCodeInvalid(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .SetPalmaUniqueCodeInvalid(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::WritePalmaActivityEntry(HLERequestContext& ctx) {
- LOG_CRITICAL(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::WritePalmaRgbLedPatternEntry(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
- const auto unknown{rp.Pop<u64>()};
-
- [[maybe_unused]] const auto buffer = ctx.ReadBuffer();
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, unknown={}",
- connection_handle.npad_id, unknown);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .WritePalmaRgbLedPatternEntry(connection_handle, unknown);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::WritePalmaWaveEntry(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
- const auto wave_set{rp.PopEnum<Controller_Palma::PalmaWaveSet>()};
- const auto unknown{rp.Pop<u64>()};
- const auto t_mem_size{rp.Pop<u64>()};
- const auto t_mem_handle{ctx.GetCopyHandle(0)};
- const auto size{rp.Pop<u64>()};
-
- ASSERT_MSG(t_mem_size == 0x3000, "t_mem_size is not 0x3000 bytes");
-
- auto t_mem = system.ApplicationProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(
- t_mem_handle);
-
- if (t_mem.IsNull()) {
- LOG_ERROR(Service_HID, "t_mem is a nullptr for handle=0x{:08X}", t_mem_handle);
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultUnknown);
- return;
- }
-
- ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size");
-
- LOG_WARNING(Service_HID,
- "(STUBBED) called, connection_handle={}, wave_set={}, unknown={}, "
- "t_mem_handle=0x{:08X}, t_mem_size={}, size={}",
- connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .WritePalmaWaveEntry(connection_handle, wave_set, t_mem->GetSourceAddress(), t_mem_size);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- s32 database_id_version;
- INSERT_PADDING_WORDS_NOINIT(1);
- Controller_Palma::PalmaConnectionHandle connection_handle;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, database_id_version={}",
- parameters.connection_handle.npad_id, parameters.database_id_version);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .SetPalmaDataBaseIdentificationVersion(parameters.connection_handle,
- parameters.database_id_version);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .GetPalmaDataBaseIdentificationVersion(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SuspendPalmaFeature(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetPalmaOperationResult(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- const auto result = applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .GetPalmaOperationResult(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(result);
-}
-
-void Hid::ReadPalmaPlayLog(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::ResetPalmaPlayLog(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetIsPalmaAllConnectable(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- bool is_palma_all_connectable;
- INSERT_PADDING_BYTES_NOINIT(7);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID,
- "(STUBBED) called, is_palma_all_connectable={},applet_resource_user_id={}",
- parameters.is_palma_all_connectable, parameters.applet_resource_user_id);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .SetIsPalmaAllConnectable(parameters.is_palma_all_connectable);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetIsPalmaPairedConnectable(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::PairPalma(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .PairPalma(connection_handle);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetPalmaBoostMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto palma_boost_mode{rp.Pop<bool>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, palma_boost_mode={}", palma_boost_mode);
-
- applet_resource->GetController<Controller_Palma>(HidController::Palma)
- .SetPalmaBoostMode(palma_boost_mode);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::CancelWritePalmaWaveEntry(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::EnablePalmaBoostMode(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetPalmaBluetoothAddress(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetDisallowedPalmaConnection(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::SetNpadCommunicationMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto applet_resource_user_id{rp.Pop<u64>()};
- const auto communication_mode{rp.PopEnum<Controller_NPad::NpadCommunicationMode>()};
-
- applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .SetNpadCommunicationMode(communication_mode);
-
- LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}, communication_mode={}",
- applet_resource_user_id, communication_mode);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::GetNpadCommunicationMode(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
-
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 4};
- rb.Push(ResultSuccess);
- rb.PushEnum(applet_resource->GetController<Controller_NPad>(HidController::NPad)
- .GetNpadCommunicationMode());
-}
-
-void Hid::SetTouchScreenConfiguration(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto touchscreen_mode{rp.PopRaw<Core::HID::TouchScreenConfigurationForNx>()};
- const auto applet_resource_user_id{rp.Pop<u64>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, touchscreen_mode={}, applet_resource_user_id={}",
- touchscreen_mode.mode, applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
-}
-
-void Hid::IsFirmwareUpdateNeededForNotification(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- struct Parameters {
- s32 unknown;
- INSERT_PADDING_WORDS_NOINIT(1);
- u64 applet_resource_user_id;
- };
- static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
-
- const auto parameters{rp.PopRaw<Parameters>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, unknown={}, applet_resource_user_id={}",
- parameters.unknown, parameters.applet_resource_user_id);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(false);
-}
-
-class HidDbg final : public ServiceFramework<HidDbg> {
-public:
- explicit HidDbg(Core::System& system_) : ServiceFramework{system_, "hid:dbg"} {
- // clang-format off
- static const FunctionInfo functions[] = {
- {0, nullptr, "DeactivateDebugPad"},
- {1, nullptr, "SetDebugPadAutoPilotState"},
- {2, nullptr, "UnsetDebugPadAutoPilotState"},
- {10, nullptr, "DeactivateTouchScreen"},
- {11, nullptr, "SetTouchScreenAutoPilotState"},
- {12, nullptr, "UnsetTouchScreenAutoPilotState"},
- {13, nullptr, "GetTouchScreenConfiguration"},
- {14, nullptr, "ProcessTouchScreenAutoTune"},
- {15, nullptr, "ForceStopTouchScreenManagement"},
- {16, nullptr, "ForceRestartTouchScreenManagement"},
- {17, nullptr, "IsTouchScreenManaged"},
- {20, nullptr, "DeactivateMouse"},
- {21, nullptr, "SetMouseAutoPilotState"},
- {22, nullptr, "UnsetMouseAutoPilotState"},
- {25, nullptr, "SetDebugMouseAutoPilotState"},
- {26, nullptr, "UnsetDebugMouseAutoPilotState"},
- {30, nullptr, "DeactivateKeyboard"},
- {31, nullptr, "SetKeyboardAutoPilotState"},
- {32, nullptr, "UnsetKeyboardAutoPilotState"},
- {50, nullptr, "DeactivateXpad"},
- {51, nullptr, "SetXpadAutoPilotState"},
- {52, nullptr, "UnsetXpadAutoPilotState"},
- {53, nullptr, "DeactivateJoyXpad"},
- {60, nullptr, "ClearNpadSystemCommonPolicy"},
- {61, nullptr, "DeactivateNpad"},
- {62, nullptr, "ForceDisconnectNpad"},
- {91, nullptr, "DeactivateGesture"},
- {110, nullptr, "DeactivateHomeButton"},
- {111, nullptr, "SetHomeButtonAutoPilotState"},
- {112, nullptr, "UnsetHomeButtonAutoPilotState"},
- {120, nullptr, "DeactivateSleepButton"},
- {121, nullptr, "SetSleepButtonAutoPilotState"},
- {122, nullptr, "UnsetSleepButtonAutoPilotState"},
- {123, nullptr, "DeactivateInputDetector"},
- {130, nullptr, "DeactivateCaptureButton"},
- {131, nullptr, "SetCaptureButtonAutoPilotState"},
- {132, nullptr, "UnsetCaptureButtonAutoPilotState"},
- {133, nullptr, "SetShiftAccelerometerCalibrationValue"},
- {134, nullptr, "GetShiftAccelerometerCalibrationValue"},
- {135, nullptr, "SetShiftGyroscopeCalibrationValue"},
- {136, nullptr, "GetShiftGyroscopeCalibrationValue"},
- {140, nullptr, "DeactivateConsoleSixAxisSensor"},
- {141, nullptr, "GetConsoleSixAxisSensorSamplingFrequency"},
- {142, nullptr, "DeactivateSevenSixAxisSensor"},
- {143, nullptr, "GetConsoleSixAxisSensorCountStates"},
- {144, nullptr, "GetAccelerometerFsr"},
- {145, nullptr, "SetAccelerometerFsr"},
- {146, nullptr, "GetAccelerometerOdr"},
- {147, nullptr, "SetAccelerometerOdr"},
- {148, nullptr, "GetGyroscopeFsr"},
- {149, nullptr, "SetGyroscopeFsr"},
- {150, nullptr, "GetGyroscopeOdr"},
- {151, nullptr, "SetGyroscopeOdr"},
- {152, nullptr, "GetWhoAmI"},
- {201, nullptr, "ActivateFirmwareUpdate"},
- {202, nullptr, "DeactivateFirmwareUpdate"},
- {203, nullptr, "StartFirmwareUpdate"},
- {204, nullptr, "GetFirmwareUpdateStage"},
- {205, nullptr, "GetFirmwareVersion"},
- {206, nullptr, "GetDestinationFirmwareVersion"},
- {207, nullptr, "DiscardFirmwareInfoCacheForRevert"},
- {208, nullptr, "StartFirmwareUpdateForRevert"},
- {209, nullptr, "GetAvailableFirmwareVersionForRevert"},
- {210, nullptr, "IsFirmwareUpdatingDevice"},
- {211, nullptr, "StartFirmwareUpdateIndividual"},
- {215, nullptr, "SetUsbFirmwareForceUpdateEnabled"},
- {216, nullptr, "SetAllKuinaDevicesToFirmwareUpdateMode"},
- {221, nullptr, "UpdateControllerColor"},
- {222, nullptr, "ConnectUsbPadsAsync"},
- {223, nullptr, "DisconnectUsbPadsAsync"},
- {224, nullptr, "UpdateDesignInfo"},
- {225, nullptr, "GetUniquePadDriverState"},
- {226, nullptr, "GetSixAxisSensorDriverStates"},
- {227, nullptr, "GetRxPacketHistory"},
- {228, nullptr, "AcquireOperationEventHandle"},
- {229, nullptr, "ReadSerialFlash"},
- {230, nullptr, "WriteSerialFlash"},
- {231, nullptr, "GetOperationResult"},
- {232, nullptr, "EnableShipmentMode"},
- {233, nullptr, "ClearPairingInfo"},
- {234, nullptr, "GetUniquePadDeviceTypeSetInternal"},
- {235, nullptr, "EnableAnalogStickPower"},
- {236, nullptr, "RequestKuinaUartClockCal"},
- {237, nullptr, "GetKuinaUartClockCal"},
- {238, nullptr, "SetKuinaUartClockTrim"},
- {239, nullptr, "KuinaLoopbackTest"},
- {240, nullptr, "RequestBatteryVoltage"},
- {241, nullptr, "GetBatteryVoltage"},
- {242, nullptr, "GetUniquePadPowerInfo"},
- {243, nullptr, "RebootUniquePad"},
- {244, nullptr, "RequestKuinaFirmwareVersion"},
- {245, nullptr, "GetKuinaFirmwareVersion"},
- {246, nullptr, "GetVidPid"},
- {247, nullptr, "GetAnalogStickCalibrationValue"},
- {248, nullptr, "GetUniquePadIdsFull"},
- {249, nullptr, "ConnectUniquePad"},
- {250, nullptr, "IsVirtual"},
- {251, nullptr, "GetAnalogStickModuleParam"},
- {301, nullptr, "GetAbstractedPadHandles"},
- {302, nullptr, "GetAbstractedPadState"},
- {303, nullptr, "GetAbstractedPadsState"},
- {321, nullptr, "SetAutoPilotVirtualPadState"},
- {322, nullptr, "UnsetAutoPilotVirtualPadState"},
- {323, nullptr, "UnsetAllAutoPilotVirtualPadState"},
- {324, nullptr, "AttachHdlsWorkBuffer"},
- {325, nullptr, "ReleaseHdlsWorkBuffer"},
- {326, nullptr, "DumpHdlsNpadAssignmentState"},
- {327, nullptr, "DumpHdlsStates"},
- {328, nullptr, "ApplyHdlsNpadAssignmentState"},
- {329, nullptr, "ApplyHdlsStateList"},
- {330, nullptr, "AttachHdlsVirtualDevice"},
- {331, nullptr, "DetachHdlsVirtualDevice"},
- {332, nullptr, "SetHdlsState"},
- {350, nullptr, "AddRegisteredDevice"},
- {400, nullptr, "DisableExternalMcuOnNxDevice"},
- {401, nullptr, "DisableRailDeviceFiltering"},
- {402, nullptr, "EnableWiredPairing"},
- {403, nullptr, "EnableShipmentModeAutoClear"},
- {404, nullptr, "SetRailEnabled"},
- {500, nullptr, "SetFactoryInt"},
- {501, nullptr, "IsFactoryBootEnabled"},
- {550, nullptr, "SetAnalogStickModelDataTemporarily"},
- {551, nullptr, "GetAnalogStickModelData"},
- {552, nullptr, "ResetAnalogStickModelData"},
- {600, nullptr, "ConvertPadState"},
- {650, nullptr, "AddButtonPlayData"},
- {651, nullptr, "StartButtonPlayData"},
- {652, nullptr, "StopButtonPlayData"},
- {2000, nullptr, "DeactivateDigitizer"},
- {2001, nullptr, "SetDigitizerAutoPilotState"},
- {2002, nullptr, "UnsetDigitizerAutoPilotState"},
- {2002, nullptr, "ReloadFirmwareDebugSettings"},
- };
- // clang-format on
-
- RegisterHandlers(functions);
- }
-};
-
-class HidSys final : public ServiceFramework<HidSys> {
-public:
- explicit HidSys(Core::System& system_, std::shared_ptr<IAppletResource> applet_resource_)
- : ServiceFramework{system_, "hid:sys"}, service_context{system_, "hid:sys"},
- applet_resource{applet_resource_} {
- // clang-format off
- static const FunctionInfo functions[] = {
- {31, nullptr, "SendKeyboardLockKeyEvent"},
- {101, nullptr, "AcquireHomeButtonEventHandle"},
- {111, nullptr, "ActivateHomeButton"},
- {121, nullptr, "AcquireSleepButtonEventHandle"},
- {131, nullptr, "ActivateSleepButton"},
- {141, nullptr, "AcquireCaptureButtonEventHandle"},
- {151, nullptr, "ActivateCaptureButton"},
- {161, nullptr, "GetPlatformConfig"},
- {210, nullptr, "AcquireNfcDeviceUpdateEventHandle"},
- {211, nullptr, "GetNpadsWithNfc"},
- {212, nullptr, "AcquireNfcActivateEventHandle"},
- {213, nullptr, "ActivateNfc"},
- {214, nullptr, "GetXcdHandleForNpadWithNfc"},
- {215, nullptr, "IsNfcActivated"},
- {230, nullptr, "AcquireIrSensorEventHandle"},
- {231, nullptr, "ActivateIrSensor"},
- {232, nullptr, "GetIrSensorState"},
- {233, nullptr, "GetXcdHandleForNpadWithIrSensor"},
- {301, nullptr, "ActivateNpadSystem"},
- {303, &HidSys::ApplyNpadSystemCommonPolicy, "ApplyNpadSystemCommonPolicy"},
- {304, nullptr, "EnableAssigningSingleOnSlSrPress"},
- {305, nullptr, "DisableAssigningSingleOnSlSrPress"},
- {306, &HidSys::GetLastActiveNpad, "GetLastActiveNpad"},
- {307, nullptr, "GetNpadSystemExtStyle"},
- {308, nullptr, "ApplyNpadSystemCommonPolicyFull"},
- {309, nullptr, "GetNpadFullKeyGripColor"},
- {310, nullptr, "GetMaskedSupportedNpadStyleSet"},
- {311, nullptr, "SetNpadPlayerLedBlinkingDevice"},
- {312, nullptr, "SetSupportedNpadStyleSetAll"},
- {313, nullptr, "GetNpadCaptureButtonAssignment"},
- {314, nullptr, "GetAppletFooterUiType"},
- {315, nullptr, "GetAppletDetailedUiType"},
- {316, nullptr, "GetNpadInterfaceType"},
- {317, nullptr, "GetNpadLeftRightInterfaceType"},
- {318, nullptr, "HasBattery"},
- {319, nullptr, "HasLeftRightBattery"},
- {321, &HidSys::GetUniquePadsFromNpad, "GetUniquePadsFromNpad"},
- {322, nullptr, "GetIrSensorState"},
- {323, nullptr, "GetXcdHandleForNpadWithIrSensor"},
- {324, nullptr, "GetUniquePadButtonSet"},
- {325, nullptr, "GetUniquePadColor"},
- {326, nullptr, "GetUniquePadAppletDetailedUiType"},
- {327, nullptr, "GetAbstractedPadIdDataFromNpad"},
- {328, nullptr, "AttachAbstractedPadToNpad"},
- {329, nullptr, "DetachAbstractedPadAll"},
- {330, nullptr, "CheckAbstractedPadConnection"},
- {500, nullptr, "SetAppletResourceUserId"},
- {501, nullptr, "RegisterAppletResourceUserId"},
- {502, nullptr, "UnregisterAppletResourceUserId"},
- {503, nullptr, "EnableAppletToGetInput"},
- {504, nullptr, "SetAruidValidForVibration"},
- {505, nullptr, "EnableAppletToGetSixAxisSensor"},
- {506, nullptr, "EnableAppletToGetPadInput"},
- {507, nullptr, "EnableAppletToGetTouchScreen"},
- {510, nullptr, "SetVibrationMasterVolume"},
- {511, nullptr, "GetVibrationMasterVolume"},
- {512, nullptr, "BeginPermitVibrationSession"},
- {513, nullptr, "EndPermitVibrationSession"},
- {514, nullptr, "Unknown514"},
- {520, nullptr, "EnableHandheldHids"},
- {521, nullptr, "DisableHandheldHids"},
- {522, nullptr, "SetJoyConRailEnabled"},
- {523, nullptr, "IsJoyConRailEnabled"},
- {524, nullptr, "IsHandheldHidsEnabled"},
- {525, nullptr, "IsJoyConAttachedOnAllRail"},
- {540, nullptr, "AcquirePlayReportControllerUsageUpdateEvent"},
- {541, nullptr, "GetPlayReportControllerUsages"},
- {542, nullptr, "AcquirePlayReportRegisteredDeviceUpdateEvent"},
- {543, nullptr, "GetRegisteredDevicesOld"},
- {544, nullptr, "AcquireConnectionTriggerTimeoutEvent"},
- {545, nullptr, "SendConnectionTrigger"},
- {546, nullptr, "AcquireDeviceRegisteredEventForControllerSupport"},
- {547, nullptr, "GetAllowedBluetoothLinksCount"},
- {548, nullptr, "GetRegisteredDevices"},
- {549, nullptr, "GetConnectableRegisteredDevices"},
- {700, nullptr, "ActivateUniquePad"},
- {702, nullptr, "AcquireUniquePadConnectionEventHandle"},
- {703, nullptr, "GetUniquePadIds"},
- {751, &HidSys::AcquireJoyDetachOnBluetoothOffEventHandle, "AcquireJoyDetachOnBluetoothOffEventHandle"},
- {800, nullptr, "ListSixAxisSensorHandles"},
- {801, nullptr, "IsSixAxisSensorUserCalibrationSupported"},
- {802, nullptr, "ResetSixAxisSensorCalibrationValues"},
- {803, nullptr, "StartSixAxisSensorUserCalibration"},
- {804, nullptr, "CancelSixAxisSensorUserCalibration"},
- {805, nullptr, "GetUniquePadBluetoothAddress"},
- {806, nullptr, "DisconnectUniquePad"},
- {807, nullptr, "GetUniquePadType"},
- {808, nullptr, "GetUniquePadInterface"},
- {809, nullptr, "GetUniquePadSerialNumber"},
- {810, nullptr, "GetUniquePadControllerNumber"},
- {811, nullptr, "GetSixAxisSensorUserCalibrationStage"},
- {812, nullptr, "GetConsoleUniqueSixAxisSensorHandle"},
- {821, nullptr, "StartAnalogStickManualCalibration"},
- {822, nullptr, "RetryCurrentAnalogStickManualCalibrationStage"},
- {823, nullptr, "CancelAnalogStickManualCalibration"},
- {824, nullptr, "ResetAnalogStickManualCalibration"},
- {825, nullptr, "GetAnalogStickState"},
- {826, nullptr, "GetAnalogStickManualCalibrationStage"},
- {827, nullptr, "IsAnalogStickButtonPressed"},
- {828, nullptr, "IsAnalogStickInReleasePosition"},
- {829, nullptr, "IsAnalogStickInCircumference"},
- {830, nullptr, "SetNotificationLedPattern"},
- {831, nullptr, "SetNotificationLedPatternWithTimeout"},
- {832, nullptr, "PrepareHidsForNotificationWake"},
- {850, &HidSys::IsUsbFullKeyControllerEnabled, "IsUsbFullKeyControllerEnabled"},
- {851, nullptr, "EnableUsbFullKeyController"},
- {852, nullptr, "IsUsbConnected"},
- {870, nullptr, "IsHandheldButtonPressedOnConsoleMode"},
- {900, nullptr, "ActivateInputDetector"},
- {901, nullptr, "NotifyInputDetector"},
- {1000, nullptr, "InitializeFirmwareUpdate"},
- {1001, nullptr, "GetFirmwareVersion"},
- {1002, nullptr, "GetAvailableFirmwareVersion"},
- {1003, nullptr, "IsFirmwareUpdateAvailable"},
- {1004, nullptr, "CheckFirmwareUpdateRequired"},
- {1005, nullptr, "StartFirmwareUpdate"},
- {1006, nullptr, "AbortFirmwareUpdate"},
- {1007, nullptr, "GetFirmwareUpdateState"},
- {1008, nullptr, "ActivateAudioControl"},
- {1009, nullptr, "AcquireAudioControlEventHandle"},
- {1010, nullptr, "GetAudioControlStates"},
- {1011, nullptr, "DeactivateAudioControl"},
- {1050, nullptr, "IsSixAxisSensorAccurateUserCalibrationSupported"},
- {1051, nullptr, "StartSixAxisSensorAccurateUserCalibration"},
- {1052, nullptr, "CancelSixAxisSensorAccurateUserCalibration"},
- {1053, nullptr, "GetSixAxisSensorAccurateUserCalibrationState"},
- {1100, nullptr, "GetHidbusSystemServiceObject"},
- {1120, nullptr, "SetFirmwareHotfixUpdateSkipEnabled"},
- {1130, nullptr, "InitializeUsbFirmwareUpdate"},
- {1131, nullptr, "FinalizeUsbFirmwareUpdate"},
- {1132, nullptr, "CheckUsbFirmwareUpdateRequired"},
- {1133, nullptr, "StartUsbFirmwareUpdate"},
- {1134, nullptr, "GetUsbFirmwareUpdateState"},
- {1150, nullptr, "SetTouchScreenMagnification"},
- {1151, nullptr, "GetTouchScreenFirmwareVersion"},
- {1152, nullptr, "SetTouchScreenDefaultConfiguration"},
- {1153, &HidSys::GetTouchScreenDefaultConfiguration, "GetTouchScreenDefaultConfiguration"},
- {1154, nullptr, "IsFirmwareAvailableForNotification"},
- {1155, nullptr, "SetForceHandheldStyleVibration"},
- {1156, nullptr, "SendConnectionTriggerWithoutTimeoutEvent"},
- {1157, nullptr, "CancelConnectionTrigger"},
- {1200, nullptr, "IsButtonConfigSupported"},
- {1201, nullptr, "IsButtonConfigEmbeddedSupported"},
- {1202, nullptr, "DeleteButtonConfig"},
- {1203, nullptr, "DeleteButtonConfigEmbedded"},
- {1204, nullptr, "SetButtonConfigEnabled"},
- {1205, nullptr, "SetButtonConfigEmbeddedEnabled"},
- {1206, nullptr, "IsButtonConfigEnabled"},
- {1207, nullptr, "IsButtonConfigEmbeddedEnabled"},
- {1208, nullptr, "SetButtonConfigEmbedded"},
- {1209, nullptr, "SetButtonConfigFull"},
- {1210, nullptr, "SetButtonConfigLeft"},
- {1211, nullptr, "SetButtonConfigRight"},
- {1212, nullptr, "GetButtonConfigEmbedded"},
- {1213, nullptr, "GetButtonConfigFull"},
- {1214, nullptr, "GetButtonConfigLeft"},
- {1215, nullptr, "GetButtonConfigRight"},
- {1250, nullptr, "IsCustomButtonConfigSupported"},
- {1251, nullptr, "IsDefaultButtonConfigEmbedded"},
- {1252, nullptr, "IsDefaultButtonConfigFull"},
- {1253, nullptr, "IsDefaultButtonConfigLeft"},
- {1254, nullptr, "IsDefaultButtonConfigRight"},
- {1255, nullptr, "IsButtonConfigStorageEmbeddedEmpty"},
- {1256, nullptr, "IsButtonConfigStorageFullEmpty"},
- {1257, nullptr, "IsButtonConfigStorageLeftEmpty"},
- {1258, nullptr, "IsButtonConfigStorageRightEmpty"},
- {1259, nullptr, "GetButtonConfigStorageEmbeddedDeprecated"},
- {1260, nullptr, "GetButtonConfigStorageFullDeprecated"},
- {1261, nullptr, "GetButtonConfigStorageLeftDeprecated"},
- {1262, nullptr, "GetButtonConfigStorageRightDeprecated"},
- {1263, nullptr, "SetButtonConfigStorageEmbeddedDeprecated"},
- {1264, nullptr, "SetButtonConfigStorageFullDeprecated"},
- {1265, nullptr, "SetButtonConfigStorageLeftDeprecated"},
- {1266, nullptr, "SetButtonConfigStorageRightDeprecated"},
- {1267, nullptr, "DeleteButtonConfigStorageEmbedded"},
- {1268, nullptr, "DeleteButtonConfigStorageFull"},
- {1269, nullptr, "DeleteButtonConfigStorageLeft"},
- {1270, nullptr, "DeleteButtonConfigStorageRight"},
- {1271, nullptr, "IsUsingCustomButtonConfig"},
- {1272, nullptr, "IsAnyCustomButtonConfigEnabled"},
- {1273, nullptr, "SetAllCustomButtonConfigEnabled"},
- {1274, nullptr, "SetDefaultButtonConfig"},
- {1275, nullptr, "SetAllDefaultButtonConfig"},
- {1276, nullptr, "SetHidButtonConfigEmbedded"},
- {1277, nullptr, "SetHidButtonConfigFull"},
- {1278, nullptr, "SetHidButtonConfigLeft"},
- {1279, nullptr, "SetHidButtonConfigRight"},
- {1280, nullptr, "GetHidButtonConfigEmbedded"},
- {1281, nullptr, "GetHidButtonConfigFull"},
- {1282, nullptr, "GetHidButtonConfigLeft"},
- {1283, nullptr, "GetHidButtonConfigRight"},
- {1284, nullptr, "GetButtonConfigStorageEmbedded"},
- {1285, nullptr, "GetButtonConfigStorageFull"},
- {1286, nullptr, "GetButtonConfigStorageLeft"},
- {1287, nullptr, "GetButtonConfigStorageRight"},
- {1288, nullptr, "SetButtonConfigStorageEmbedded"},
- {1289, nullptr, "SetButtonConfigStorageFull"},
- {1290, nullptr, "DeleteButtonConfigStorageRight"},
- {1291, nullptr, "DeleteButtonConfigStorageRight"},
- };
- // clang-format on
-
- RegisterHandlers(functions);
-
- joy_detach_event = service_context.CreateEvent("HidSys::JoyDetachEvent");
- }
-
-private:
- void ApplyNpadSystemCommonPolicy(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "called");
-
- GetAppletResource()
- ->GetController<Controller_NPad>(HidController::NPad)
- .ApplyNpadSystemCommonPolicy();
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
- }
-
- void GetLastActiveNpad(HLERequestContext& ctx) {
- LOG_DEBUG(Service_HID, "(STUBBED) called");
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.PushEnum(system.HIDCore().GetLastActiveController());
- }
-
- void GetUniquePadsFromNpad(HLERequestContext& ctx) {
- IPC::RequestParser rp{ctx};
- const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
-
- LOG_WARNING(Service_HID, "(STUBBED) called, npad_id_type={}", npad_id_type);
-
- const std::vector<Core::HID::UniquePadId> unique_pads{};
-
- ctx.WriteBuffer(unique_pads);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(static_cast<u32>(unique_pads.size()));
- }
-
- void AcquireJoyDetachOnBluetoothOffEventHandle(HLERequestContext& ctx) {
- LOG_INFO(Service_AM, "called");
-
- IPC::ResponseBuilder rb{ctx, 2, 1};
- rb.Push(ResultSuccess);
- rb.PushCopyObjects(joy_detach_event->GetReadableEvent());
- }
-
- void IsUsbFullKeyControllerEnabled(HLERequestContext& ctx) {
- const bool is_enabled = false;
-
- LOG_WARNING(Service_HID, "(STUBBED) called, is_enabled={}", is_enabled);
-
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.Push(is_enabled);
- }
-
- void GetTouchScreenDefaultConfiguration(HLERequestContext& ctx) {
- LOG_WARNING(Service_HID, "(STUBBED) called");
-
- Core::HID::TouchScreenConfigurationForNx touchscreen_config{
- .mode = Core::HID::TouchScreenModeForNx::Finger,
- };
-
- if (touchscreen_config.mode != Core::HID::TouchScreenModeForNx::Heat2 &&
- touchscreen_config.mode != Core::HID::TouchScreenModeForNx::Finger) {
- touchscreen_config.mode = Core::HID::TouchScreenModeForNx::UseSystemSetting;
- }
-
- IPC::ResponseBuilder rb{ctx, 6};
- rb.Push(ResultSuccess);
- rb.PushRaw(touchscreen_config);
- }
-
- std::shared_ptr<IAppletResource> GetAppletResource() {
- if (applet_resource == nullptr) {
- applet_resource = std::make_shared<IAppletResource>(system, service_context);
- }
-
- return applet_resource;
- }
-
- Kernel::KEvent* joy_detach_event;
- KernelHelpers::ServiceContext service_context;
- std::shared_ptr<IAppletResource> applet_resource;
-};
-
void LoopProcess(Core::System& system) {
auto server_manager = std::make_unique<ServerManager>(system);
- std::shared_ptr<IAppletResource> applet_resource;
+ std::shared_ptr<ResourceManager> resouce_manager = std::make_shared<ResourceManager>(system);
+ std::shared_ptr<HidFirmwareSettings> firmware_settings =
+ std::make_shared<HidFirmwareSettings>();
+
+ server_manager->RegisterNamedService(
+ "hid", std::make_shared<IHidServer>(system, resouce_manager, firmware_settings));
+ server_manager->RegisterNamedService(
+ "hid:dbg", std::make_shared<IHidDebugServer>(system, resouce_manager));
+ server_manager->RegisterNamedService(
+ "hid:sys", std::make_shared<IHidSystemServer>(system, resouce_manager));
- server_manager->RegisterNamedService("hid", std::make_shared<Hid>(system, applet_resource));
server_manager->RegisterNamedService("hidbus", std::make_shared<HidBus>(system));
- server_manager->RegisterNamedService("hid:dbg", std::make_shared<HidDbg>(system));
- server_manager->RegisterNamedService("hid:sys",
- std::make_shared<HidSys>(system, applet_resource));
- server_manager->RegisterNamedService("irs", std::make_shared<Service::IRS::IRS>(system));
- server_manager->RegisterNamedService("irs:sys",
- std::make_shared<Service::IRS::IRS_SYS>(system));
+ server_manager->RegisterNamedService("irs", std::make_shared<IRS::IRS>(system));
+ server_manager->RegisterNamedService("irs:sys", std::make_shared<IRS::IRS_SYS>(system));
server_manager->RegisterNamedService("xcd:sys", std::make_shared<XCD_SYS>(system));
+
system.RunServer(std::move(server_manager));
}
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index 0ca43de93..ec5463f4e 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -3,220 +3,12 @@
#pragma once
-#include <chrono>
-
-#include "core/hle/service/hid/controllers/controller_base.h"
-#include "core/hle/service/kernel_helpers.h"
-#include "core/hle/service/service.h"
-
-namespace Core::Timing {
-struct EventType;
-}
-
-namespace Service::SM {
-class ServiceManager;
+namespace Core {
+class System;
}
namespace Service::HID {
-enum class HidController : std::size_t {
- DebugPad,
- Touchscreen,
- Mouse,
- Keyboard,
- XPad,
- HomeButton,
- SleepButton,
- CaptureButton,
- InputDetector,
- UniquePad,
- NPad,
- Gesture,
- ConsoleSixAxisSensor,
- DebugMouse,
- Palma,
-
- MaxControllers,
-};
-
-class IAppletResource final : public ServiceFramework<IAppletResource> {
-public:
- explicit IAppletResource(Core::System& system_,
- KernelHelpers::ServiceContext& service_context_);
- ~IAppletResource() override;
-
- void ActivateController(HidController controller);
- void DeactivateController(HidController controller);
-
- template <typename T>
- T& GetController(HidController controller) {
- return static_cast<T&>(*controllers[static_cast<size_t>(controller)]);
- }
-
- template <typename T>
- const T& GetController(HidController controller) const {
- return static_cast<T&>(*controllers[static_cast<size_t>(controller)]);
- }
-
-private:
- template <typename T>
- void MakeController(HidController controller, u8* shared_memory) {
- if constexpr (std::is_constructible_v<T, Core::System&, u8*>) {
- controllers[static_cast<std::size_t>(controller)] =
- std::make_unique<T>(system, shared_memory);
- } else {
- controllers[static_cast<std::size_t>(controller)] =
- std::make_unique<T>(system.HIDCore(), shared_memory);
- }
- }
-
- template <typename T>
- void MakeControllerWithServiceContext(HidController controller, u8* shared_memory) {
- controllers[static_cast<std::size_t>(controller)] =
- std::make_unique<T>(system.HIDCore(), shared_memory, service_context);
- }
-
- void GetSharedMemoryHandle(HLERequestContext& ctx);
- void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
- void UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
- void UpdateMouseKeyboard(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
- void UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
-
- KernelHelpers::ServiceContext& service_context;
-
- std::shared_ptr<Core::Timing::EventType> npad_update_event;
- std::shared_ptr<Core::Timing::EventType> default_update_event;
- std::shared_ptr<Core::Timing::EventType> mouse_keyboard_update_event;
- std::shared_ptr<Core::Timing::EventType> motion_update_event;
-
- std::array<std::unique_ptr<ControllerBase>, static_cast<size_t>(HidController::MaxControllers)>
- controllers{};
-};
-
-class Hid final : public ServiceFramework<Hid> {
-public:
- explicit Hid(Core::System& system_, std::shared_ptr<IAppletResource> applet_resource_);
- ~Hid() override;
-
- std::shared_ptr<IAppletResource> GetAppletResource();
-
-private:
- void CreateAppletResource(HLERequestContext& ctx);
- void ActivateDebugPad(HLERequestContext& ctx);
- void ActivateTouchScreen(HLERequestContext& ctx);
- void ActivateMouse(HLERequestContext& ctx);
- void ActivateKeyboard(HLERequestContext& ctx);
- void SendKeyboardLockKeyEvent(HLERequestContext& ctx);
- void ActivateXpad(HLERequestContext& ctx);
- void GetXpadIDs(HLERequestContext& ctx);
- void ActivateSixAxisSensor(HLERequestContext& ctx);
- void DeactivateSixAxisSensor(HLERequestContext& ctx);
- void StartSixAxisSensor(HLERequestContext& ctx);
- void StopSixAxisSensor(HLERequestContext& ctx);
- void IsSixAxisSensorFusionEnabled(HLERequestContext& ctx);
- void EnableSixAxisSensorFusion(HLERequestContext& ctx);
- void SetSixAxisSensorFusionParameters(HLERequestContext& ctx);
- void GetSixAxisSensorFusionParameters(HLERequestContext& ctx);
- void ResetSixAxisSensorFusionParameters(HLERequestContext& ctx);
- void SetGyroscopeZeroDriftMode(HLERequestContext& ctx);
- void GetGyroscopeZeroDriftMode(HLERequestContext& ctx);
- void ResetGyroscopeZeroDriftMode(HLERequestContext& ctx);
- void IsSixAxisSensorAtRest(HLERequestContext& ctx);
- void IsFirmwareUpdateAvailableForSixAxisSensor(HLERequestContext& ctx);
- void EnableSixAxisSensorUnalteredPassthrough(HLERequestContext& ctx);
- void IsSixAxisSensorUnalteredPassthroughEnabled(HLERequestContext& ctx);
- void LoadSixAxisSensorCalibrationParameter(HLERequestContext& ctx);
- void GetSixAxisSensorIcInformation(HLERequestContext& ctx);
- void ResetIsSixAxisSensorDeviceNewlyAssigned(HLERequestContext& ctx);
- void ActivateGesture(HLERequestContext& ctx);
- void SetSupportedNpadStyleSet(HLERequestContext& ctx);
- void GetSupportedNpadStyleSet(HLERequestContext& ctx);
- void SetSupportedNpadIdType(HLERequestContext& ctx);
- void ActivateNpad(HLERequestContext& ctx);
- void DeactivateNpad(HLERequestContext& ctx);
- void AcquireNpadStyleSetUpdateEventHandle(HLERequestContext& ctx);
- void DisconnectNpad(HLERequestContext& ctx);
- void GetPlayerLedPattern(HLERequestContext& ctx);
- void ActivateNpadWithRevision(HLERequestContext& ctx);
- void SetNpadJoyHoldType(HLERequestContext& ctx);
- void GetNpadJoyHoldType(HLERequestContext& ctx);
- void SetNpadJoyAssignmentModeSingleByDefault(HLERequestContext& ctx);
- void SetNpadJoyAssignmentModeSingle(HLERequestContext& ctx);
- void SetNpadJoyAssignmentModeDual(HLERequestContext& ctx);
- void MergeSingleJoyAsDualJoy(HLERequestContext& ctx);
- void StartLrAssignmentMode(HLERequestContext& ctx);
- void StopLrAssignmentMode(HLERequestContext& ctx);
- void SetNpadHandheldActivationMode(HLERequestContext& ctx);
- void GetNpadHandheldActivationMode(HLERequestContext& ctx);
- void SwapNpadAssignment(HLERequestContext& ctx);
- void IsUnintendedHomeButtonInputProtectionEnabled(HLERequestContext& ctx);
- void EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx);
- void SetNpadJoyAssignmentModeSingleWithDestination(HLERequestContext& ctx);
- void SetNpadAnalogStickUseCenterClamp(HLERequestContext& ctx);
- void SetNpadCaptureButtonAssignment(HLERequestContext& ctx);
- void ClearNpadCaptureButtonAssignment(HLERequestContext& ctx);
- void GetVibrationDeviceInfo(HLERequestContext& ctx);
- void SendVibrationValue(HLERequestContext& ctx);
- void GetActualVibrationValue(HLERequestContext& ctx);
- void CreateActiveVibrationDeviceList(HLERequestContext& ctx);
- void PermitVibration(HLERequestContext& ctx);
- void IsVibrationPermitted(HLERequestContext& ctx);
- void SendVibrationValues(HLERequestContext& ctx);
- void SendVibrationGcErmCommand(HLERequestContext& ctx);
- void GetActualVibrationGcErmCommand(HLERequestContext& ctx);
- void BeginPermitVibrationSession(HLERequestContext& ctx);
- void EndPermitVibrationSession(HLERequestContext& ctx);
- void IsVibrationDeviceMounted(HLERequestContext& ctx);
- void ActivateConsoleSixAxisSensor(HLERequestContext& ctx);
- void StartConsoleSixAxisSensor(HLERequestContext& ctx);
- void StopConsoleSixAxisSensor(HLERequestContext& ctx);
- void ActivateSevenSixAxisSensor(HLERequestContext& ctx);
- void StartSevenSixAxisSensor(HLERequestContext& ctx);
- void StopSevenSixAxisSensor(HLERequestContext& ctx);
- void InitializeSevenSixAxisSensor(HLERequestContext& ctx);
- void FinalizeSevenSixAxisSensor(HLERequestContext& ctx);
- void ResetSevenSixAxisSensorTimestamp(HLERequestContext& ctx);
- void IsUsbFullKeyControllerEnabled(HLERequestContext& ctx);
- void GetPalmaConnectionHandle(HLERequestContext& ctx);
- void InitializePalma(HLERequestContext& ctx);
- void AcquirePalmaOperationCompleteEvent(HLERequestContext& ctx);
- void GetPalmaOperationInfo(HLERequestContext& ctx);
- void PlayPalmaActivity(HLERequestContext& ctx);
- void SetPalmaFrModeType(HLERequestContext& ctx);
- void ReadPalmaStep(HLERequestContext& ctx);
- void EnablePalmaStep(HLERequestContext& ctx);
- void ResetPalmaStep(HLERequestContext& ctx);
- void ReadPalmaApplicationSection(HLERequestContext& ctx);
- void WritePalmaApplicationSection(HLERequestContext& ctx);
- void ReadPalmaUniqueCode(HLERequestContext& ctx);
- void SetPalmaUniqueCodeInvalid(HLERequestContext& ctx);
- void WritePalmaActivityEntry(HLERequestContext& ctx);
- void WritePalmaRgbLedPatternEntry(HLERequestContext& ctx);
- void WritePalmaWaveEntry(HLERequestContext& ctx);
- void SetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx);
- void GetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx);
- void SuspendPalmaFeature(HLERequestContext& ctx);
- void GetPalmaOperationResult(HLERequestContext& ctx);
- void ReadPalmaPlayLog(HLERequestContext& ctx);
- void ResetPalmaPlayLog(HLERequestContext& ctx);
- void SetIsPalmaAllConnectable(HLERequestContext& ctx);
- void SetIsPalmaPairedConnectable(HLERequestContext& ctx);
- void PairPalma(HLERequestContext& ctx);
- void SetPalmaBoostMode(HLERequestContext& ctx);
- void CancelWritePalmaWaveEntry(HLERequestContext& ctx);
- void EnablePalmaBoostMode(HLERequestContext& ctx);
- void GetPalmaBluetoothAddress(HLERequestContext& ctx);
- void SetDisallowedPalmaConnection(HLERequestContext& ctx);
- void SetNpadCommunicationMode(HLERequestContext& ctx);
- void GetNpadCommunicationMode(HLERequestContext& ctx);
- void SetTouchScreenConfiguration(HLERequestContext& ctx);
- void IsFirmwareUpdateNeededForNotification(HLERequestContext& ctx);
-
- std::shared_ptr<IAppletResource> applet_resource;
-
- KernelHelpers::ServiceContext service_context;
-};
-
void LoopProcess(Core::System& system);
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_debug_server.cpp b/src/core/hle/service/hid/hid_debug_server.cpp
new file mode 100644
index 000000000..6294f3dfb
--- /dev/null
+++ b/src/core/hle/service/hid/hid_debug_server.cpp
@@ -0,0 +1,159 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "core/hle/service/hid/hid_debug_server.h"
+#include "core/hle/service/hid/resource_manager.h"
+#include "core/hle/service/ipc_helpers.h"
+
+namespace Service::HID {
+
+IHidDebugServer::IHidDebugServer(Core::System& system_, std::shared_ptr<ResourceManager> resource)
+ : ServiceFramework{system_, "hid:dbg"}, resource_manager{resource} {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {0, nullptr, "DeactivateDebugPad"},
+ {1, nullptr, "SetDebugPadAutoPilotState"},
+ {2, nullptr, "UnsetDebugPadAutoPilotState"},
+ {10, nullptr, "DeactivateTouchScreen"},
+ {11, nullptr, "SetTouchScreenAutoPilotState"},
+ {12, nullptr, "UnsetTouchScreenAutoPilotState"},
+ {13, nullptr, "GetTouchScreenConfiguration"},
+ {14, nullptr, "ProcessTouchScreenAutoTune"},
+ {15, nullptr, "ForceStopTouchScreenManagement"},
+ {16, nullptr, "ForceRestartTouchScreenManagement"},
+ {17, nullptr, "IsTouchScreenManaged"},
+ {20, nullptr, "DeactivateMouse"},
+ {21, nullptr, "SetMouseAutoPilotState"},
+ {22, nullptr, "UnsetMouseAutoPilotState"},
+ {25, nullptr, "SetDebugMouseAutoPilotState"},
+ {26, nullptr, "UnsetDebugMouseAutoPilotState"},
+ {30, nullptr, "DeactivateKeyboard"},
+ {31, nullptr, "SetKeyboardAutoPilotState"},
+ {32, nullptr, "UnsetKeyboardAutoPilotState"},
+ {50, nullptr, "DeactivateXpad"},
+ {51, nullptr, "SetXpadAutoPilotState"},
+ {52, nullptr, "UnsetXpadAutoPilotState"},
+ {53, nullptr, "DeactivateJoyXpad"},
+ {60, nullptr, "ClearNpadSystemCommonPolicy"},
+ {61, nullptr, "DeactivateNpad"},
+ {62, nullptr, "ForceDisconnectNpad"},
+ {91, nullptr, "DeactivateGesture"},
+ {110, nullptr, "DeactivateHomeButton"},
+ {111, nullptr, "SetHomeButtonAutoPilotState"},
+ {112, nullptr, "UnsetHomeButtonAutoPilotState"},
+ {120, nullptr, "DeactivateSleepButton"},
+ {121, nullptr, "SetSleepButtonAutoPilotState"},
+ {122, nullptr, "UnsetSleepButtonAutoPilotState"},
+ {123, nullptr, "DeactivateInputDetector"},
+ {130, nullptr, "DeactivateCaptureButton"},
+ {131, nullptr, "SetCaptureButtonAutoPilotState"},
+ {132, nullptr, "UnsetCaptureButtonAutoPilotState"},
+ {133, nullptr, "SetShiftAccelerometerCalibrationValue"},
+ {134, nullptr, "GetShiftAccelerometerCalibrationValue"},
+ {135, nullptr, "SetShiftGyroscopeCalibrationValue"},
+ {136, nullptr, "GetShiftGyroscopeCalibrationValue"},
+ {140, nullptr, "DeactivateConsoleSixAxisSensor"},
+ {141, nullptr, "GetConsoleSixAxisSensorSamplingFrequency"},
+ {142, nullptr, "DeactivateSevenSixAxisSensor"},
+ {143, nullptr, "GetConsoleSixAxisSensorCountStates"},
+ {144, nullptr, "GetAccelerometerFsr"},
+ {145, nullptr, "SetAccelerometerFsr"},
+ {146, nullptr, "GetAccelerometerOdr"},
+ {147, nullptr, "SetAccelerometerOdr"},
+ {148, nullptr, "GetGyroscopeFsr"},
+ {149, nullptr, "SetGyroscopeFsr"},
+ {150, nullptr, "GetGyroscopeOdr"},
+ {151, nullptr, "SetGyroscopeOdr"},
+ {152, nullptr, "GetWhoAmI"},
+ {201, nullptr, "ActivateFirmwareUpdate"},
+ {202, nullptr, "DeactivateFirmwareUpdate"},
+ {203, nullptr, "StartFirmwareUpdate"},
+ {204, nullptr, "GetFirmwareUpdateStage"},
+ {205, nullptr, "GetFirmwareVersion"},
+ {206, nullptr, "GetDestinationFirmwareVersion"},
+ {207, nullptr, "DiscardFirmwareInfoCacheForRevert"},
+ {208, nullptr, "StartFirmwareUpdateForRevert"},
+ {209, nullptr, "GetAvailableFirmwareVersionForRevert"},
+ {210, nullptr, "IsFirmwareUpdatingDevice"},
+ {211, nullptr, "StartFirmwareUpdateIndividual"},
+ {215, nullptr, "SetUsbFirmwareForceUpdateEnabled"},
+ {216, nullptr, "SetAllKuinaDevicesToFirmwareUpdateMode"},
+ {221, nullptr, "UpdateControllerColor"},
+ {222, nullptr, "ConnectUsbPadsAsync"},
+ {223, nullptr, "DisconnectUsbPadsAsync"},
+ {224, nullptr, "UpdateDesignInfo"},
+ {225, nullptr, "GetUniquePadDriverState"},
+ {226, nullptr, "GetSixAxisSensorDriverStates"},
+ {227, nullptr, "GetRxPacketHistory"},
+ {228, nullptr, "AcquireOperationEventHandle"},
+ {229, nullptr, "ReadSerialFlash"},
+ {230, nullptr, "WriteSerialFlash"},
+ {231, nullptr, "GetOperationResult"},
+ {232, nullptr, "EnableShipmentMode"},
+ {233, nullptr, "ClearPairingInfo"},
+ {234, nullptr, "GetUniquePadDeviceTypeSetInternal"},
+ {235, nullptr, "EnableAnalogStickPower"},
+ {236, nullptr, "RequestKuinaUartClockCal"},
+ {237, nullptr, "GetKuinaUartClockCal"},
+ {238, nullptr, "SetKuinaUartClockTrim"},
+ {239, nullptr, "KuinaLoopbackTest"},
+ {240, nullptr, "RequestBatteryVoltage"},
+ {241, nullptr, "GetBatteryVoltage"},
+ {242, nullptr, "GetUniquePadPowerInfo"},
+ {243, nullptr, "RebootUniquePad"},
+ {244, nullptr, "RequestKuinaFirmwareVersion"},
+ {245, nullptr, "GetKuinaFirmwareVersion"},
+ {246, nullptr, "GetVidPid"},
+ {247, nullptr, "GetAnalogStickCalibrationValue"},
+ {248, nullptr, "GetUniquePadIdsFull"},
+ {249, nullptr, "ConnectUniquePad"},
+ {250, nullptr, "IsVirtual"},
+ {251, nullptr, "GetAnalogStickModuleParam"},
+ {301, nullptr, "GetAbstractedPadHandles"},
+ {302, nullptr, "GetAbstractedPadState"},
+ {303, nullptr, "GetAbstractedPadsState"},
+ {321, nullptr, "SetAutoPilotVirtualPadState"},
+ {322, nullptr, "UnsetAutoPilotVirtualPadState"},
+ {323, nullptr, "UnsetAllAutoPilotVirtualPadState"},
+ {324, nullptr, "AttachHdlsWorkBuffer"},
+ {325, nullptr, "ReleaseHdlsWorkBuffer"},
+ {326, nullptr, "DumpHdlsNpadAssignmentState"},
+ {327, nullptr, "DumpHdlsStates"},
+ {328, nullptr, "ApplyHdlsNpadAssignmentState"},
+ {329, nullptr, "ApplyHdlsStateList"},
+ {330, nullptr, "AttachHdlsVirtualDevice"},
+ {331, nullptr, "DetachHdlsVirtualDevice"},
+ {332, nullptr, "SetHdlsState"},
+ {350, nullptr, "AddRegisteredDevice"},
+ {400, nullptr, "DisableExternalMcuOnNxDevice"},
+ {401, nullptr, "DisableRailDeviceFiltering"},
+ {402, nullptr, "EnableWiredPairing"},
+ {403, nullptr, "EnableShipmentModeAutoClear"},
+ {404, nullptr, "SetRailEnabled"},
+ {500, nullptr, "SetFactoryInt"},
+ {501, nullptr, "IsFactoryBootEnabled"},
+ {550, nullptr, "SetAnalogStickModelDataTemporarily"},
+ {551, nullptr, "GetAnalogStickModelData"},
+ {552, nullptr, "ResetAnalogStickModelData"},
+ {600, nullptr, "ConvertPadState"},
+ {650, nullptr, "AddButtonPlayData"},
+ {651, nullptr, "StartButtonPlayData"},
+ {652, nullptr, "StopButtonPlayData"},
+ {2000, nullptr, "DeactivateDigitizer"},
+ {2001, nullptr, "SetDigitizerAutoPilotState"},
+ {2002, nullptr, "UnsetDigitizerAutoPilotState"},
+ {2002, nullptr, "ReloadFirmwareDebugSettings"},
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+}
+
+IHidDebugServer::~IHidDebugServer() = default;
+
+std::shared_ptr<ResourceManager> IHidDebugServer::GetResourceManager() {
+ resource_manager->Initialize();
+ return resource_manager;
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_debug_server.h b/src/core/hle/service/hid/hid_debug_server.h
new file mode 100644
index 000000000..406db2211
--- /dev/null
+++ b/src/core/hle/service/hid/hid_debug_server.h
@@ -0,0 +1,26 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "core/hle/service/service.h"
+
+namespace Core {
+class System;
+}
+
+namespace Service::HID {
+class ResourceManager;
+
+class IHidDebugServer final : public ServiceFramework<IHidDebugServer> {
+public:
+ explicit IHidDebugServer(Core::System& system_, std::shared_ptr<ResourceManager> resource);
+ ~IHidDebugServer() override;
+
+private:
+ std::shared_ptr<ResourceManager> GetResourceManager();
+
+ std::shared_ptr<ResourceManager> resource_manager;
+};
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_firmware_settings.cpp b/src/core/hle/service/hid/hid_firmware_settings.cpp
new file mode 100644
index 000000000..59bd6825c
--- /dev/null
+++ b/src/core/hle/service/hid/hid_firmware_settings.cpp
@@ -0,0 +1,99 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "core/hle/service/hid/hid_firmware_settings.h"
+
+namespace Service::HID {
+
+HidFirmwareSettings::HidFirmwareSettings() {
+ LoadSettings(true);
+}
+
+void HidFirmwareSettings::Reload() {
+ LoadSettings(true);
+}
+
+void HidFirmwareSettings::LoadSettings(bool reload_config) {
+ if (is_initalized && !reload_config) {
+ return;
+ }
+
+ // TODO: Use nn::settings::fwdbg::GetSettingsItemValue to load config values
+
+ is_debug_pad_enabled = true;
+ is_device_managed = true;
+ is_touch_i2c_managed = is_device_managed;
+ is_future_devices_emulated = false;
+ is_mcu_hardware_error_emulated = false;
+ is_rail_enabled = true;
+ is_firmware_update_failure_emulated = false;
+ is_firmware_update_failure = {};
+ is_ble_disabled = false;
+ is_dscale_disabled = false;
+ is_handheld_forced = true;
+ features_per_id_disabled = {};
+ is_touch_firmware_auto_update_disabled = false;
+ is_initalized = true;
+}
+
+bool HidFirmwareSettings::IsDebugPadEnabled() {
+ LoadSettings(false);
+ return is_debug_pad_enabled;
+}
+
+bool HidFirmwareSettings::IsDeviceManaged() {
+ LoadSettings(false);
+ return is_device_managed;
+}
+
+bool HidFirmwareSettings::IsEmulateFutureDevice() {
+ LoadSettings(false);
+ return is_future_devices_emulated;
+}
+
+bool HidFirmwareSettings::IsTouchI2cManaged() {
+ LoadSettings(false);
+ return is_touch_i2c_managed;
+}
+
+bool HidFirmwareSettings::IsHandheldForced() {
+ LoadSettings(false);
+ return is_handheld_forced;
+}
+
+bool HidFirmwareSettings::IsRailEnabled() {
+ LoadSettings(false);
+ return is_rail_enabled;
+}
+
+bool HidFirmwareSettings::IsHardwareErrorEmulated() {
+ LoadSettings(false);
+ return is_mcu_hardware_error_emulated;
+}
+
+bool HidFirmwareSettings::IsBleDisabled() {
+ LoadSettings(false);
+ return is_ble_disabled;
+}
+
+bool HidFirmwareSettings::IsDscaleDisabled() {
+ LoadSettings(false);
+ return is_dscale_disabled;
+}
+
+bool HidFirmwareSettings::IsTouchAutoUpdateDisabled() {
+ LoadSettings(false);
+ return is_touch_firmware_auto_update_disabled;
+}
+
+HidFirmwareSettings::FirmwareSetting HidFirmwareSettings::GetFirmwareUpdateFailure() {
+ LoadSettings(false);
+ return is_firmware_update_failure;
+}
+
+HidFirmwareSettings::FeaturesPerId HidFirmwareSettings::FeaturesDisabledPerId() {
+ LoadSettings(false);
+ return features_per_id_disabled;
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_firmware_settings.h b/src/core/hle/service/hid/hid_firmware_settings.h
new file mode 100644
index 000000000..6c10c440b
--- /dev/null
+++ b/src/core/hle/service/hid/hid_firmware_settings.h
@@ -0,0 +1,54 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Service::HID {
+
+/// Loads firmware config from nn::settings::fwdbg
+class HidFirmwareSettings {
+public:
+ using FirmwareSetting = std::array<u8, 4>;
+ using FeaturesPerId = std::array<bool, 0xA8>;
+
+ HidFirmwareSettings();
+
+ void Reload();
+ void LoadSettings(bool reload_config);
+
+ bool IsDebugPadEnabled();
+ bool IsDeviceManaged();
+ bool IsEmulateFutureDevice();
+ bool IsTouchI2cManaged();
+ bool IsHandheldForced();
+ bool IsRailEnabled();
+ bool IsHardwareErrorEmulated();
+ bool IsBleDisabled();
+ bool IsDscaleDisabled();
+ bool IsTouchAutoUpdateDisabled();
+
+ FirmwareSetting GetFirmwareUpdateFailure();
+ FeaturesPerId FeaturesDisabledPerId();
+
+private:
+ bool is_initalized{};
+
+ // Debug settings
+ bool is_debug_pad_enabled{};
+ bool is_device_managed{};
+ bool is_touch_i2c_managed{};
+ bool is_future_devices_emulated{};
+ bool is_mcu_hardware_error_emulated{};
+ bool is_rail_enabled{};
+ bool is_firmware_update_failure_emulated{};
+ bool is_ble_disabled{};
+ bool is_dscale_disabled{};
+ bool is_handheld_forced{};
+ bool is_touch_firmware_auto_update_disabled{};
+ FirmwareSetting is_firmware_update_failure{};
+ FeaturesPerId features_per_id_disabled{};
+};
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_server.cpp b/src/core/hle/service/hid/hid_server.cpp
new file mode 100644
index 000000000..a7d1578d9
--- /dev/null
+++ b/src/core/hle/service/hid/hid_server.cpp
@@ -0,0 +1,2387 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <array>
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "common/settings.h"
+#include "core/hid/hid_core.h"
+#include "core/hle/kernel/k_shared_memory.h"
+#include "core/hle/kernel/k_transfer_memory.h"
+#include "core/hle/kernel/kernel.h"
+#include "core/hle/service/hid/errors.h"
+#include "core/hle/service/hid/hid_firmware_settings.h"
+#include "core/hle/service/hid/hid_server.h"
+#include "core/hle/service/hid/hid_util.h"
+#include "core/hle/service/hid/resource_manager.h"
+#include "core/hle/service/ipc_helpers.h"
+#include "core/memory.h"
+
+#include "core/hle/service/hid/controllers/console_six_axis.h"
+#include "core/hle/service/hid/controllers/controller_base.h"
+#include "core/hle/service/hid/controllers/debug_pad.h"
+#include "core/hle/service/hid/controllers/gesture.h"
+#include "core/hle/service/hid/controllers/keyboard.h"
+#include "core/hle/service/hid/controllers/mouse.h"
+#include "core/hle/service/hid/controllers/npad.h"
+#include "core/hle/service/hid/controllers/palma.h"
+#include "core/hle/service/hid/controllers/seven_six_axis.h"
+#include "core/hle/service/hid/controllers/six_axis.h"
+#include "core/hle/service/hid/controllers/touchscreen.h"
+
+namespace Service::HID {
+
+class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
+public:
+ explicit IActiveVibrationDeviceList(Core::System& system_,
+ std::shared_ptr<ResourceManager> resource)
+ : ServiceFramework{system_, "IActiveVibrationDeviceList"}, resource_manager(resource) {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {0, &IActiveVibrationDeviceList::InitializeVibrationDevice, "InitializeVibrationDevice"},
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+ }
+
+private:
+ void InitializeVibrationDevice(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()};
+
+ if (resource_manager != nullptr) {
+ resource_manager->GetNpad()->InitializeVibrationDevice(vibration_device_handle);
+ }
+
+ LOG_DEBUG(Service_HID, "called, npad_type={}, npad_id={}, device_index={}",
+ vibration_device_handle.npad_type, vibration_device_handle.npad_id,
+ vibration_device_handle.device_index);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ }
+
+ std::shared_ptr<ResourceManager> resource_manager;
+};
+
+IHidServer::IHidServer(Core::System& system_, std::shared_ptr<ResourceManager> resource,
+ std::shared_ptr<HidFirmwareSettings> settings)
+ : ServiceFramework{system_, "hid"}, resource_manager{resource}, firmware_settings{settings} {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {0, &IHidServer::CreateAppletResource, "CreateAppletResource"},
+ {1, &IHidServer::ActivateDebugPad, "ActivateDebugPad"},
+ {11, &IHidServer::ActivateTouchScreen, "ActivateTouchScreen"},
+ {21, &IHidServer::ActivateMouse, "ActivateMouse"},
+ {26, nullptr, "ActivateDebugMouse"},
+ {31, &IHidServer::ActivateKeyboard, "ActivateKeyboard"},
+ {32, &IHidServer::SendKeyboardLockKeyEvent, "SendKeyboardLockKeyEvent"},
+ {40, &IHidServer::AcquireXpadIdEventHandle, "AcquireXpadIdEventHandle"},
+ {41, &IHidServer::ReleaseXpadIdEventHandle, "ReleaseXpadIdEventHandle"},
+ {51, &IHidServer::ActivateXpad, "ActivateXpad"},
+ {55, &IHidServer::GetXpadIds, "GetXpadIds"},
+ {56, &IHidServer::ActivateJoyXpad, "ActivateJoyXpad"},
+ {58, &IHidServer::GetJoyXpadLifoHandle, "GetJoyXpadLifoHandle"},
+ {59, &IHidServer::GetJoyXpadIds, "GetJoyXpadIds"},
+ {60, &IHidServer::ActivateSixAxisSensor, "ActivateSixAxisSensor"},
+ {61, &IHidServer::DeactivateSixAxisSensor, "DeactivateSixAxisSensor"},
+ {62, &IHidServer::GetSixAxisSensorLifoHandle, "GetSixAxisSensorLifoHandle"},
+ {63, &IHidServer::ActivateJoySixAxisSensor, "ActivateJoySixAxisSensor"},
+ {64, &IHidServer::DeactivateJoySixAxisSensor, "DeactivateJoySixAxisSensor"},
+ {65, &IHidServer::GetJoySixAxisSensorLifoHandle, "GetJoySixAxisSensorLifoHandle"},
+ {66, &IHidServer::StartSixAxisSensor, "StartSixAxisSensor"},
+ {67, &IHidServer::StopSixAxisSensor, "StopSixAxisSensor"},
+ {68, &IHidServer::IsSixAxisSensorFusionEnabled, "IsSixAxisSensorFusionEnabled"},
+ {69, &IHidServer::EnableSixAxisSensorFusion, "EnableSixAxisSensorFusion"},
+ {70, &IHidServer::SetSixAxisSensorFusionParameters, "SetSixAxisSensorFusionParameters"},
+ {71, &IHidServer::GetSixAxisSensorFusionParameters, "GetSixAxisSensorFusionParameters"},
+ {72, &IHidServer::ResetSixAxisSensorFusionParameters, "ResetSixAxisSensorFusionParameters"},
+ {73, nullptr, "SetAccelerometerParameters"},
+ {74, nullptr, "GetAccelerometerParameters"},
+ {75, nullptr, "ResetAccelerometerParameters"},
+ {76, nullptr, "SetAccelerometerPlayMode"},
+ {77, nullptr, "GetAccelerometerPlayMode"},
+ {78, nullptr, "ResetAccelerometerPlayMode"},
+ {79, &IHidServer::SetGyroscopeZeroDriftMode, "SetGyroscopeZeroDriftMode"},
+ {80, &IHidServer::GetGyroscopeZeroDriftMode, "GetGyroscopeZeroDriftMode"},
+ {81, &IHidServer::ResetGyroscopeZeroDriftMode, "ResetGyroscopeZeroDriftMode"},
+ {82, &IHidServer::IsSixAxisSensorAtRest, "IsSixAxisSensorAtRest"},
+ {83, &IHidServer::IsFirmwareUpdateAvailableForSixAxisSensor, "IsFirmwareUpdateAvailableForSixAxisSensor"},
+ {84, &IHidServer::EnableSixAxisSensorUnalteredPassthrough, "EnableSixAxisSensorUnalteredPassthrough"},
+ {85, &IHidServer::IsSixAxisSensorUnalteredPassthroughEnabled, "IsSixAxisSensorUnalteredPassthroughEnabled"},
+ {86, nullptr, "StoreSixAxisSensorCalibrationParameter"},
+ {87, &IHidServer::LoadSixAxisSensorCalibrationParameter, "LoadSixAxisSensorCalibrationParameter"},
+ {88, &IHidServer::GetSixAxisSensorIcInformation, "GetSixAxisSensorIcInformation"},
+ {89, &IHidServer::ResetIsSixAxisSensorDeviceNewlyAssigned, "ResetIsSixAxisSensorDeviceNewlyAssigned"},
+ {91, &IHidServer::ActivateGesture, "ActivateGesture"},
+ {100, &IHidServer::SetSupportedNpadStyleSet, "SetSupportedNpadStyleSet"},
+ {101, &IHidServer::GetSupportedNpadStyleSet, "GetSupportedNpadStyleSet"},
+ {102, &IHidServer::SetSupportedNpadIdType, "SetSupportedNpadIdType"},
+ {103, &IHidServer::ActivateNpad, "ActivateNpad"},
+ {104, &IHidServer::DeactivateNpad, "DeactivateNpad"},
+ {106, &IHidServer::AcquireNpadStyleSetUpdateEventHandle, "AcquireNpadStyleSetUpdateEventHandle"},
+ {107, &IHidServer::DisconnectNpad, "DisconnectNpad"},
+ {108, &IHidServer::GetPlayerLedPattern, "GetPlayerLedPattern"},
+ {109, &IHidServer::ActivateNpadWithRevision, "ActivateNpadWithRevision"},
+ {120, &IHidServer::SetNpadJoyHoldType, "SetNpadJoyHoldType"},
+ {121, &IHidServer::GetNpadJoyHoldType, "GetNpadJoyHoldType"},
+ {122, &IHidServer::SetNpadJoyAssignmentModeSingleByDefault, "SetNpadJoyAssignmentModeSingleByDefault"},
+ {123, &IHidServer::SetNpadJoyAssignmentModeSingle, "SetNpadJoyAssignmentModeSingle"},
+ {124, &IHidServer::SetNpadJoyAssignmentModeDual, "SetNpadJoyAssignmentModeDual"},
+ {125, &IHidServer::MergeSingleJoyAsDualJoy, "MergeSingleJoyAsDualJoy"},
+ {126, &IHidServer::StartLrAssignmentMode, "StartLrAssignmentMode"},
+ {127, &IHidServer::StopLrAssignmentMode, "StopLrAssignmentMode"},
+ {128, &IHidServer::SetNpadHandheldActivationMode, "SetNpadHandheldActivationMode"},
+ {129, &IHidServer::GetNpadHandheldActivationMode, "GetNpadHandheldActivationMode"},
+ {130, &IHidServer::SwapNpadAssignment, "SwapNpadAssignment"},
+ {131, &IHidServer::IsUnintendedHomeButtonInputProtectionEnabled, "IsUnintendedHomeButtonInputProtectionEnabled"},
+ {132, &IHidServer::EnableUnintendedHomeButtonInputProtection, "EnableUnintendedHomeButtonInputProtection"},
+ {133, &IHidServer::SetNpadJoyAssignmentModeSingleWithDestination, "SetNpadJoyAssignmentModeSingleWithDestination"},
+ {134, &IHidServer::SetNpadAnalogStickUseCenterClamp, "SetNpadAnalogStickUseCenterClamp"},
+ {135, &IHidServer::SetNpadCaptureButtonAssignment, "SetNpadCaptureButtonAssignment"},
+ {136, &IHidServer::ClearNpadCaptureButtonAssignment, "ClearNpadCaptureButtonAssignment"},
+ {200, &IHidServer::GetVibrationDeviceInfo, "GetVibrationDeviceInfo"},
+ {201, &IHidServer::SendVibrationValue, "SendVibrationValue"},
+ {202, &IHidServer::GetActualVibrationValue, "GetActualVibrationValue"},
+ {203, &IHidServer::CreateActiveVibrationDeviceList, "CreateActiveVibrationDeviceList"},
+ {204, &IHidServer::PermitVibration, "PermitVibration"},
+ {205, &IHidServer::IsVibrationPermitted, "IsVibrationPermitted"},
+ {206, &IHidServer::SendVibrationValues, "SendVibrationValues"},
+ {207, &IHidServer::SendVibrationGcErmCommand, "SendVibrationGcErmCommand"},
+ {208, &IHidServer::GetActualVibrationGcErmCommand, "GetActualVibrationGcErmCommand"},
+ {209, &IHidServer::BeginPermitVibrationSession, "BeginPermitVibrationSession"},
+ {210, &IHidServer::EndPermitVibrationSession, "EndPermitVibrationSession"},
+ {211, &IHidServer::IsVibrationDeviceMounted, "IsVibrationDeviceMounted"},
+ {212, nullptr, "SendVibrationValueInBool"},
+ {300, &IHidServer::ActivateConsoleSixAxisSensor, "ActivateConsoleSixAxisSensor"},
+ {301, &IHidServer::StartConsoleSixAxisSensor, "StartConsoleSixAxisSensor"},
+ {302, &IHidServer::StopConsoleSixAxisSensor, "StopConsoleSixAxisSensor"},
+ {303, &IHidServer::ActivateSevenSixAxisSensor, "ActivateSevenSixAxisSensor"},
+ {304, &IHidServer::StartSevenSixAxisSensor, "StartSevenSixAxisSensor"},
+ {305, &IHidServer::StopSevenSixAxisSensor, "StopSevenSixAxisSensor"},
+ {306, &IHidServer::InitializeSevenSixAxisSensor, "InitializeSevenSixAxisSensor"},
+ {307, &IHidServer::FinalizeSevenSixAxisSensor, "FinalizeSevenSixAxisSensor"},
+ {308, nullptr, "SetSevenSixAxisSensorFusionStrength"},
+ {309, nullptr, "GetSevenSixAxisSensorFusionStrength"},
+ {310, &IHidServer::ResetSevenSixAxisSensorTimestamp, "ResetSevenSixAxisSensorTimestamp"},
+ {400, &IHidServer::IsUsbFullKeyControllerEnabled, "IsUsbFullKeyControllerEnabled"},
+ {401, nullptr, "EnableUsbFullKeyController"},
+ {402, nullptr, "IsUsbFullKeyControllerConnected"},
+ {403, nullptr, "HasBattery"},
+ {404, nullptr, "HasLeftRightBattery"},
+ {405, nullptr, "GetNpadInterfaceType"},
+ {406, nullptr, "GetNpadLeftRightInterfaceType"},
+ {407, nullptr, "GetNpadOfHighestBatteryLevel"},
+ {408, nullptr, "GetNpadOfHighestBatteryLevelForJoyRight"},
+ {500, &IHidServer::GetPalmaConnectionHandle, "GetPalmaConnectionHandle"},
+ {501, &IHidServer::InitializePalma, "InitializePalma"},
+ {502, &IHidServer::AcquirePalmaOperationCompleteEvent, "AcquirePalmaOperationCompleteEvent"},
+ {503, &IHidServer::GetPalmaOperationInfo, "GetPalmaOperationInfo"},
+ {504, &IHidServer::PlayPalmaActivity, "PlayPalmaActivity"},
+ {505, &IHidServer::SetPalmaFrModeType, "SetPalmaFrModeType"},
+ {506, &IHidServer::ReadPalmaStep, "ReadPalmaStep"},
+ {507, &IHidServer::EnablePalmaStep, "EnablePalmaStep"},
+ {508, &IHidServer::ResetPalmaStep, "ResetPalmaStep"},
+ {509, &IHidServer::ReadPalmaApplicationSection, "ReadPalmaApplicationSection"},
+ {510, &IHidServer::WritePalmaApplicationSection, "WritePalmaApplicationSection"},
+ {511, &IHidServer::ReadPalmaUniqueCode, "ReadPalmaUniqueCode"},
+ {512, &IHidServer::SetPalmaUniqueCodeInvalid, "SetPalmaUniqueCodeInvalid"},
+ {513, &IHidServer::WritePalmaActivityEntry, "WritePalmaActivityEntry"},
+ {514, &IHidServer::WritePalmaRgbLedPatternEntry, "WritePalmaRgbLedPatternEntry"},
+ {515, &IHidServer::WritePalmaWaveEntry, "WritePalmaWaveEntry"},
+ {516, &IHidServer::SetPalmaDataBaseIdentificationVersion, "SetPalmaDataBaseIdentificationVersion"},
+ {517, &IHidServer::GetPalmaDataBaseIdentificationVersion, "GetPalmaDataBaseIdentificationVersion"},
+ {518, &IHidServer::SuspendPalmaFeature, "SuspendPalmaFeature"},
+ {519, &IHidServer::GetPalmaOperationResult, "GetPalmaOperationResult"},
+ {520, &IHidServer::ReadPalmaPlayLog, "ReadPalmaPlayLog"},
+ {521, &IHidServer::ResetPalmaPlayLog, "ResetPalmaPlayLog"},
+ {522, &IHidServer::SetIsPalmaAllConnectable, "SetIsPalmaAllConnectable"},
+ {523, &IHidServer::SetIsPalmaPairedConnectable, "SetIsPalmaPairedConnectable"},
+ {524, &IHidServer::PairPalma, "PairPalma"},
+ {525, &IHidServer::SetPalmaBoostMode, "SetPalmaBoostMode"},
+ {526, &IHidServer::CancelWritePalmaWaveEntry, "CancelWritePalmaWaveEntry"},
+ {527, &IHidServer::EnablePalmaBoostMode, "EnablePalmaBoostMode"},
+ {528, &IHidServer::GetPalmaBluetoothAddress, "GetPalmaBluetoothAddress"},
+ {529, &IHidServer::SetDisallowedPalmaConnection, "SetDisallowedPalmaConnection"},
+ {1000, &IHidServer::SetNpadCommunicationMode, "SetNpadCommunicationMode"},
+ {1001, &IHidServer::GetNpadCommunicationMode, "GetNpadCommunicationMode"},
+ {1002, &IHidServer::SetTouchScreenConfiguration, "SetTouchScreenConfiguration"},
+ {1003, &IHidServer::IsFirmwareUpdateNeededForNotification, "IsFirmwareUpdateNeededForNotification"},
+ {1004, &IHidServer::SetTouchScreenResolution, "SetTouchScreenResolution"},
+ {2000, nullptr, "ActivateDigitizer"},
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+}
+
+IHidServer::~IHidServer() = default;
+
+void IHidServer::CreateAppletResource(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2, 0, 1};
+ rb.Push(ResultSuccess);
+ rb.PushIpcInterface<IAppletResource>(system, resource_manager);
+}
+
+void IHidServer::ActivateDebugPad(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ Result result = ResultSuccess;
+ auto debug_pad = GetResourceManager()->GetDebugPad();
+
+ if (!firmware_settings->IsDeviceManaged()) {
+ result = debug_pad->Activate();
+ }
+
+ if (result.IsSuccess()) {
+ result = debug_pad->Activate(applet_resource_user_id);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ActivateTouchScreen(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ Result result = ResultSuccess;
+ auto touch_screen = GetResourceManager()->GetTouchScreen();
+
+ if (!firmware_settings->IsDeviceManaged()) {
+ result = touch_screen->Activate();
+ }
+
+ if (result.IsSuccess()) {
+ result = touch_screen->Activate(applet_resource_user_id);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ActivateMouse(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ Result result = ResultSuccess;
+ auto mouse = GetResourceManager()->GetMouse();
+
+ if (!firmware_settings->IsDeviceManaged()) {
+ result = mouse->Activate();
+ }
+
+ if (result.IsSuccess()) {
+ result = mouse->Activate(applet_resource_user_id);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ActivateKeyboard(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ Result result = ResultSuccess;
+ auto keyboard = GetResourceManager()->GetKeyboard();
+
+ if (!firmware_settings->IsDeviceManaged()) {
+ result = keyboard->Activate();
+ }
+
+ if (result.IsSuccess()) {
+ result = keyboard->Activate(applet_resource_user_id);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::SendKeyboardLockKeyEvent(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto flags{rp.Pop<u32>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called. flags={}", flags);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::AcquireXpadIdEventHandle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ // Handle returned is null here
+}
+
+void IHidServer::ReleaseXpadIdEventHandle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::ActivateXpad(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ u32 basic_xpad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_DEBUG(Service_HID, "called, basic_xpad_id={}, applet_resource_user_id={}",
+ parameters.basic_xpad_id, parameters.applet_resource_user_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetXpadIds(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_HID, "called");
+
+ // This function has been hardcoded since 10.0.0+
+ const std::array<u32, 4> basic_xpad_id{0, 1, 2, 3};
+ ctx.WriteBuffer(basic_xpad_id);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.Push<s64>(basic_xpad_id.size());
+}
+
+void IHidServer::ActivateJoyXpad(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetJoyXpadLifoHandle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ // Handle returned is null here
+}
+
+void IHidServer::GetJoyXpadIds(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_HID, "called");
+
+ // This function has been hardcoded since 10.0.0+
+ const s64 basic_xpad_id_count{};
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.Push(basic_xpad_id_count);
+}
+
+void IHidServer::ActivateSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::DeactivateSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetSixAxisSensorLifoHandle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::ActivateJoySixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::DeactivateJoySixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetJoySixAxisSensorLifoHandle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto joy_xpad_id{rp.Pop<u32>()};
+
+ LOG_DEBUG(Service_HID, "called, joy_xpad_id={}", joy_xpad_id);
+
+ // This function has been stubbed since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ // Handle returned is null here
+}
+
+void IHidServer::StartSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->SetSixAxisEnabled(parameters.sixaxis_handle, true);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::StopSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->SetSixAxisEnabled(parameters.sixaxis_handle, false);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::IsSixAxisSensorFusionEnabled(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ bool is_enabled{};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result =
+ six_axis->IsSixAxisSensorFusionEnabled(parameters.sixaxis_handle, is_enabled);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(result);
+ rb.Push(is_enabled);
+}
+
+void IHidServer::EnableSixAxisSensorFusion(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ bool enable_sixaxis_sensor_fusion;
+ INSERT_PADDING_BYTES_NOINIT(3);
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->SetSixAxisFusionEnabled(parameters.sixaxis_handle,
+ parameters.enable_sixaxis_sensor_fusion);
+
+ LOG_DEBUG(Service_HID,
+ "called, enable_sixaxis_sensor_fusion={}, npad_type={}, npad_id={}, "
+ "device_index={}, applet_resource_user_id={}",
+ parameters.enable_sixaxis_sensor_fusion, parameters.sixaxis_handle.npad_type,
+ parameters.sixaxis_handle.npad_id, parameters.sixaxis_handle.device_index,
+ parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::SetSixAxisSensorFusionParameters(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ Core::HID::SixAxisSensorFusionParameters sixaxis_fusion;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result =
+ six_axis->SetSixAxisFusionParameters(parameters.sixaxis_handle, parameters.sixaxis_fusion);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, parameter1={}, "
+ "parameter2={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.sixaxis_fusion.parameter1,
+ parameters.sixaxis_fusion.parameter2, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::GetSixAxisSensorFusionParameters(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::SixAxisSensorFusionParameters fusion_parameters{};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result =
+ six_axis->GetSixAxisFusionParameters(parameters.sixaxis_handle, fusion_parameters);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(result);
+ rb.PushRaw(fusion_parameters);
+}
+
+void IHidServer::ResetSixAxisSensorFusionParameters(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ // Since these parameters are unknown just use what HW outputs
+ const Core::HID::SixAxisSensorFusionParameters fusion_parameters{
+ .parameter1 = 0.03f,
+ .parameter2 = 0.4f,
+ };
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result1 =
+ six_axis->SetSixAxisFusionParameters(parameters.sixaxis_handle, fusion_parameters);
+ const auto result2 = six_axis->SetSixAxisFusionEnabled(parameters.sixaxis_handle, true);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ if (result1.IsError()) {
+ rb.Push(result1);
+ return;
+ }
+ rb.Push(result2);
+}
+
+void IHidServer::SetGyroscopeZeroDriftMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto sixaxis_handle{rp.PopRaw<Core::HID::SixAxisSensorHandle>()};
+ const auto drift_mode{rp.PopEnum<Core::HID::GyroscopeZeroDriftMode>()};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->SetGyroscopeZeroDriftMode(sixaxis_handle, drift_mode);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, drift_mode={}, "
+ "applet_resource_user_id={}",
+ sixaxis_handle.npad_type, sixaxis_handle.npad_id, sixaxis_handle.device_index,
+ drift_mode, applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::GetGyroscopeZeroDriftMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto drift_mode{Core::HID::GyroscopeZeroDriftMode::Standard};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->GetGyroscopeZeroDriftMode(parameters.sixaxis_handle, drift_mode);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(result);
+ rb.PushEnum(drift_mode);
+}
+
+void IHidServer::ResetGyroscopeZeroDriftMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ const auto drift_mode{Core::HID::GyroscopeZeroDriftMode::Standard};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->SetGyroscopeZeroDriftMode(parameters.sixaxis_handle, drift_mode);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::IsSixAxisSensorAtRest(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ bool is_at_rest{};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ six_axis->IsSixAxisSensorAtRest(parameters.sixaxis_handle, is_at_rest);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(is_at_rest);
+}
+
+void IHidServer::IsFirmwareUpdateAvailableForSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ bool is_firmware_available{};
+ auto controller = GetResourceManager()->GetNpad();
+ controller->IsFirmwareUpdateAvailableForSixAxisSensor(parameters.sixaxis_handle,
+ is_firmware_available);
+
+ LOG_WARNING(
+ Service_HID,
+ "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(is_firmware_available);
+}
+
+void IHidServer::EnableSixAxisSensorUnalteredPassthrough(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ bool enabled;
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->EnableSixAxisSensorUnalteredPassthrough(parameters.sixaxis_handle,
+ parameters.enabled);
+
+ LOG_DEBUG(Service_HID,
+ "(STUBBED) called, enabled={}, npad_type={}, npad_id={}, device_index={}, "
+ "applet_resource_user_id={}",
+ parameters.enabled, parameters.sixaxis_handle.npad_type,
+ parameters.sixaxis_handle.npad_id, parameters.sixaxis_handle.device_index,
+ parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::IsSixAxisSensorUnalteredPassthroughEnabled(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ bool is_unaltered_sisxaxis_enabled{};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result = six_axis->IsSixAxisSensorUnalteredPassthroughEnabled(
+ parameters.sixaxis_handle, is_unaltered_sisxaxis_enabled);
+
+ LOG_DEBUG(
+ Service_HID,
+ "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(result);
+ rb.Push(is_unaltered_sisxaxis_enabled);
+}
+
+void IHidServer::LoadSixAxisSensorCalibrationParameter(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::SixAxisSensorCalibrationParameter calibration{};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result =
+ six_axis->LoadSixAxisSensorCalibrationParameter(parameters.sixaxis_handle, calibration);
+
+ LOG_WARNING(
+ Service_HID,
+ "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ if (result.IsSuccess()) {
+ ctx.WriteBuffer(calibration);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::GetSixAxisSensorIcInformation(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::SixAxisSensorIcInformation ic_information{};
+ auto six_axis = GetResourceManager()->GetSixAxis();
+ const auto result =
+ six_axis->GetSixAxisSensorIcInformation(parameters.sixaxis_handle, ic_information);
+
+ LOG_WARNING(
+ Service_HID,
+ "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ if (result.IsSuccess()) {
+ ctx.WriteBuffer(ic_information);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ResetIsSixAxisSensorDeviceNewlyAssigned(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::SixAxisSensorHandle sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto controller = GetResourceManager()->GetNpad();
+ const auto result =
+ controller->ResetIsSixAxisSensorDeviceNewlyAssigned(parameters.sixaxis_handle);
+
+ LOG_WARNING(
+ Service_HID,
+ "(STUBBED) called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.sixaxis_handle.npad_type, parameters.sixaxis_handle.npad_id,
+ parameters.sixaxis_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ActivateGesture(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ u32 basic_gesture_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_INFO(Service_HID, "called, basic_gesture_id={}, applet_resource_user_id={}",
+ parameters.basic_gesture_id, parameters.applet_resource_user_id);
+
+ Result result = ResultSuccess;
+ auto gesture = GetResourceManager()->GetGesture();
+
+ if (!firmware_settings->IsDeviceManaged()) {
+ result = gesture->Activate();
+ }
+
+ if (result.IsSuccess()) {
+ // TODO: Use gesture id here
+ result = gesture->Activate(parameters.applet_resource_user_id);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::SetSupportedNpadStyleSet(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadStyleSet supported_styleset;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ GetResourceManager()->GetNpad()->SetSupportedStyleSet({parameters.supported_styleset});
+
+ LOG_DEBUG(Service_HID, "called, supported_styleset={}, applet_resource_user_id={}",
+ parameters.supported_styleset, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetSupportedNpadStyleSet(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(GetResourceManager()->GetNpad()->GetSupportedStyleSet().raw);
+}
+
+void IHidServer::SetSupportedNpadIdType(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ const auto result = GetResourceManager()->GetNpad()->SetSupportedNpadIdTypes(ctx.ReadBuffer());
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ActivateNpad(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ auto npad = GetResourceManager()->GetNpad();
+
+ // TODO: npad->SetRevision(applet_resource_user_id, NpadRevision::Revision0);
+ const Result result = npad->Activate(applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::DeactivateNpad(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ // This function does nothing since 10.0.0+
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::AcquireNpadStyleSetUpdateEventHandle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ u64 unknown;
+ };
+ static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}, unknown={}",
+ parameters.npad_id, parameters.applet_resource_user_id, parameters.unknown);
+
+ // Games expect this event to be signaled after calling this function
+ GetResourceManager()->GetNpad()->SignalStyleSetChangedEvent(parameters.npad_id);
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ rb.PushCopyObjects(
+ GetResourceManager()->GetNpad()->GetStyleSetChangedEvent(parameters.npad_id));
+}
+
+void IHidServer::DisconnectNpad(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto controller = GetResourceManager()->GetNpad();
+ controller->DisconnectNpad(parameters.npad_id);
+
+ LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
+ parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetPlayerLedPattern(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ Core::HID::LedPattern pattern{0, 0, 0, 0};
+ auto controller = GetResourceManager()->GetNpad();
+ const auto result = controller->GetLedPattern(npad_id, pattern);
+
+ LOG_DEBUG(Service_HID, "called, npad_id={}", npad_id);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(result);
+ rb.Push(pattern.raw);
+}
+
+void IHidServer::ActivateNpadWithRevision(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ NPad::NpadRevision revision;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_DEBUG(Service_HID, "called, revision={}, applet_resource_user_id={}", parameters.revision,
+ parameters.applet_resource_user_id);
+
+ auto npad = GetResourceManager()->GetNpad();
+
+ // TODO: npad->SetRevision(applet_resource_user_id, revision);
+ const auto result = npad->Activate(parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::SetNpadJoyHoldType(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+ const auto hold_type{rp.PopEnum<NPad::NpadJoyHoldType>()};
+
+ GetResourceManager()->GetNpad()->SetHoldType(hold_type);
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, hold_type={}",
+ applet_resource_user_id, hold_type);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetNpadJoyHoldType(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(GetResourceManager()->GetNpad()->GetHoldType());
+}
+
+void IHidServer::SetNpadJoyAssignmentModeSingleByDefault(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::NpadIdType new_npad_id{};
+ auto controller = GetResourceManager()->GetNpad();
+ controller->SetNpadMode(new_npad_id, parameters.npad_id, NPad::NpadJoyDeviceType::Left,
+ NPad::NpadJoyAssignmentMode::Single);
+
+ LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
+ parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetNpadJoyAssignmentModeSingle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ NPad::NpadJoyDeviceType npad_joy_device_type;
+ };
+ static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::NpadIdType new_npad_id{};
+ auto controller = GetResourceManager()->GetNpad();
+ controller->SetNpadMode(new_npad_id, parameters.npad_id, parameters.npad_joy_device_type,
+ NPad::NpadJoyAssignmentMode::Single);
+
+ LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
+ parameters.npad_id, parameters.applet_resource_user_id,
+ parameters.npad_joy_device_type);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetNpadJoyAssignmentModeDual(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::NpadIdType new_npad_id{};
+ auto controller = GetResourceManager()->GetNpad();
+ controller->SetNpadMode(new_npad_id, parameters.npad_id, {}, NPad::NpadJoyAssignmentMode::Dual);
+
+ LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
+ parameters.applet_resource_user_id); // Spams a lot when controller applet is open
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::MergeSingleJoyAsDualJoy(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_1{rp.PopEnum<Core::HID::NpadIdType>()};
+ const auto npad_id_2{rp.PopEnum<Core::HID::NpadIdType>()};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ auto controller = GetResourceManager()->GetNpad();
+ const auto result = controller->MergeSingleJoyAsDualJoy(npad_id_1, npad_id_2);
+
+ LOG_DEBUG(Service_HID, "called, npad_id_1={}, npad_id_2={}, applet_resource_user_id={}",
+ npad_id_1, npad_id_2, applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::StartLrAssignmentMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ GetResourceManager()->GetNpad()->StartLRAssignmentMode();
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::StopLrAssignmentMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ GetResourceManager()->GetNpad()->StopLRAssignmentMode();
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetNpadHandheldActivationMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+ const auto activation_mode{rp.PopEnum<NPad::NpadHandheldActivationMode>()};
+
+ GetResourceManager()->GetNpad()->SetNpadHandheldActivationMode(activation_mode);
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, activation_mode={}",
+ applet_resource_user_id, activation_mode);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetNpadHandheldActivationMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(GetResourceManager()->GetNpad()->GetNpadHandheldActivationMode());
+}
+
+void IHidServer::SwapNpadAssignment(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_1{rp.PopEnum<Core::HID::NpadIdType>()};
+ const auto npad_id_2{rp.PopEnum<Core::HID::NpadIdType>()};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ auto controller = GetResourceManager()->GetNpad();
+ const auto result = controller->SwapNpadAssignment(npad_id_1, npad_id_2);
+
+ LOG_DEBUG(Service_HID, "called, npad_id_1={}, npad_id_2={}, applet_resource_user_id={}",
+ npad_id_1, npad_id_2, applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::IsUnintendedHomeButtonInputProtectionEnabled(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ bool is_enabled = false;
+ auto controller = GetResourceManager()->GetNpad();
+ const auto result =
+ controller->IsUnintendedHomeButtonInputProtectionEnabled(parameters.npad_id, is_enabled);
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}",
+ parameters.npad_id, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(result);
+ rb.Push(is_enabled);
+}
+
+void IHidServer::EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ bool is_enabled;
+ INSERT_PADDING_BYTES_NOINIT(3);
+ Core::HID::NpadIdType npad_id;
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ auto controller = GetResourceManager()->GetNpad();
+ const auto result = controller->SetUnintendedHomeButtonInputProtectionEnabled(
+ parameters.is_enabled, parameters.npad_id);
+
+ LOG_DEBUG(Service_HID,
+ "(STUBBED) called, is_enabled={}, npad_id={}, applet_resource_user_id={}",
+ parameters.is_enabled, parameters.npad_id, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::SetNpadJoyAssignmentModeSingleWithDestination(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ NPad::NpadJoyDeviceType npad_joy_device_type;
+ };
+ static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ Core::HID::NpadIdType new_npad_id{};
+ auto controller = GetResourceManager()->GetNpad();
+ const auto is_reassigned =
+ controller->SetNpadMode(new_npad_id, parameters.npad_id, parameters.npad_joy_device_type,
+ NPad::NpadJoyAssignmentMode::Single);
+
+ LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
+ parameters.npad_id, parameters.applet_resource_user_id,
+ parameters.npad_joy_device_type);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.Push(is_reassigned);
+ rb.PushEnum(new_npad_id);
+}
+
+void IHidServer::SetNpadAnalogStickUseCenterClamp(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ bool analog_stick_use_center_clamp;
+ INSERT_PADDING_BYTES_NOINIT(7);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ GetResourceManager()->GetNpad()->SetAnalogStickUseCenterClamp(
+ parameters.analog_stick_use_center_clamp);
+
+ LOG_WARNING(Service_HID,
+ "(STUBBED) called, analog_stick_use_center_clamp={}, applet_resource_user_id={}",
+ parameters.analog_stick_use_center_clamp, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetNpadCaptureButtonAssignment(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadStyleSet npad_styleset;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ Core::HID::NpadButton button;
+ };
+ static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID,
+ "(STUBBED) called, npad_styleset={}, applet_resource_user_id={}, button={}",
+ parameters.npad_styleset, parameters.applet_resource_user_id, parameters.button);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::ClearNpadCaptureButtonAssignment(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
+ applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetVibrationDeviceInfo(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()};
+ const auto controller = GetResourceManager()->GetNpad();
+
+ Core::HID::VibrationDeviceInfo vibration_device_info;
+ bool check_device_index = false;
+
+ switch (vibration_device_handle.npad_type) {
+ case Core::HID::NpadStyleIndex::ProController:
+ case Core::HID::NpadStyleIndex::Handheld:
+ case Core::HID::NpadStyleIndex::JoyconDual:
+ case Core::HID::NpadStyleIndex::JoyconLeft:
+ case Core::HID::NpadStyleIndex::JoyconRight:
+ vibration_device_info.type = Core::HID::VibrationDeviceType::LinearResonantActuator;
+ check_device_index = true;
+ break;
+ case Core::HID::NpadStyleIndex::GameCube:
+ vibration_device_info.type = Core::HID::VibrationDeviceType::GcErm;
+ break;
+ case Core::HID::NpadStyleIndex::N64:
+ vibration_device_info.type = Core::HID::VibrationDeviceType::N64;
+ break;
+ default:
+ vibration_device_info.type = Core::HID::VibrationDeviceType::Unknown;
+ break;
+ }
+
+ vibration_device_info.position = Core::HID::VibrationDevicePosition::None;
+ if (check_device_index) {
+ switch (vibration_device_handle.device_index) {
+ case Core::HID::DeviceIndex::Left:
+ vibration_device_info.position = Core::HID::VibrationDevicePosition::Left;
+ break;
+ case Core::HID::DeviceIndex::Right:
+ vibration_device_info.position = Core::HID::VibrationDevicePosition::Right;
+ break;
+ case Core::HID::DeviceIndex::None:
+ default:
+ ASSERT_MSG(false, "DeviceIndex should never be None!");
+ break;
+ }
+ }
+
+ LOG_DEBUG(Service_HID, "called, vibration_device_type={}, vibration_device_position={}",
+ vibration_device_info.type, vibration_device_info.position);
+
+ const auto result = IsVibrationHandleValid(vibration_device_handle);
+ if (result.IsError()) {
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+ return;
+ }
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(vibration_device_info);
+}
+
+void IHidServer::SendVibrationValue(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::VibrationDeviceHandle vibration_device_handle;
+ Core::HID::VibrationValue vibration_value;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x20, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ GetResourceManager()->GetNpad()->VibrateController(parameters.vibration_device_handle,
+ parameters.vibration_value);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.vibration_device_handle.npad_type,
+ parameters.vibration_device_handle.npad_id,
+ parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetActualVibrationValue(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::VibrationDeviceHandle vibration_device_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.vibration_device_handle.npad_type,
+ parameters.vibration_device_handle.npad_id,
+ parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 6};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(
+ GetResourceManager()->GetNpad()->GetLastVibration(parameters.vibration_device_handle));
+}
+
+void IHidServer::CreateActiveVibrationDeviceList(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_HID, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 0, 1};
+ rb.Push(ResultSuccess);
+ rb.PushIpcInterface<IActiveVibrationDeviceList>(system, GetResourceManager());
+}
+
+void IHidServer::PermitVibration(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto can_vibrate{rp.Pop<bool>()};
+
+ // nnSDK saves this value as a float. Since it can only be 1.0f or 0.0f we simplify this value
+ // by converting it to a bool
+ Settings::values.vibration_enabled.SetValue(can_vibrate);
+
+ LOG_DEBUG(Service_HID, "called, can_vibrate={}", can_vibrate);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::IsVibrationPermitted(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_HID, "called");
+
+ // nnSDK checks if a float is greater than zero. We return the bool we stored earlier
+ const auto is_enabled = Settings::values.vibration_enabled.GetValue();
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(is_enabled);
+}
+
+void IHidServer::SendVibrationValues(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ const auto handle_data = ctx.ReadBuffer(0);
+ const auto handle_count = ctx.GetReadBufferNumElements<Core::HID::VibrationDeviceHandle>(0);
+ const auto vibration_data = ctx.ReadBuffer(1);
+ const auto vibration_count = ctx.GetReadBufferNumElements<Core::HID::VibrationValue>(1);
+
+ auto vibration_device_handles =
+ std::span(reinterpret_cast<const Core::HID::VibrationDeviceHandle*>(handle_data.data()),
+ handle_count);
+ auto vibration_values = std::span(
+ reinterpret_cast<const Core::HID::VibrationValue*>(vibration_data.data()), vibration_count);
+
+ GetResourceManager()->GetNpad()->VibrateControllers(vibration_device_handles, vibration_values);
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SendVibrationGcErmCommand(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::VibrationDeviceHandle vibration_device_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ Core::HID::VibrationGcErmCommand gc_erm_command;
+ };
+ static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ /**
+ * Note: This uses yuzu-specific behavior such that the StopHard command produces
+ * vibrations where freq_low == 0.0f and freq_high == 0.0f, as defined below,
+ * in order to differentiate between Stop and StopHard commands.
+ * This is done to reuse the controller vibration functions made for regular controllers.
+ */
+ const auto vibration_value = [parameters] {
+ switch (parameters.gc_erm_command) {
+ case Core::HID::VibrationGcErmCommand::Stop:
+ return Core::HID::VibrationValue{
+ .low_amplitude = 0.0f,
+ .low_frequency = 160.0f,
+ .high_amplitude = 0.0f,
+ .high_frequency = 320.0f,
+ };
+ case Core::HID::VibrationGcErmCommand::Start:
+ return Core::HID::VibrationValue{
+ .low_amplitude = 1.0f,
+ .low_frequency = 160.0f,
+ .high_amplitude = 1.0f,
+ .high_frequency = 320.0f,
+ };
+ case Core::HID::VibrationGcErmCommand::StopHard:
+ return Core::HID::VibrationValue{
+ .low_amplitude = 0.0f,
+ .low_frequency = 0.0f,
+ .high_amplitude = 0.0f,
+ .high_frequency = 0.0f,
+ };
+ default:
+ return Core::HID::DEFAULT_VIBRATION_VALUE;
+ }
+ }();
+
+ GetResourceManager()->GetNpad()->VibrateController(parameters.vibration_device_handle,
+ vibration_value);
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}, "
+ "gc_erm_command={}",
+ parameters.vibration_device_handle.npad_type,
+ parameters.vibration_device_handle.npad_id,
+ parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id,
+ parameters.gc_erm_command);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetActualVibrationGcErmCommand(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::VibrationDeviceHandle vibration_device_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ const auto last_vibration =
+ GetResourceManager()->GetNpad()->GetLastVibration(parameters.vibration_device_handle);
+
+ const auto gc_erm_command = [last_vibration] {
+ if (last_vibration.low_amplitude != 0.0f || last_vibration.high_amplitude != 0.0f) {
+ return Core::HID::VibrationGcErmCommand::Start;
+ }
+
+ /**
+ * Note: This uses yuzu-specific behavior such that the StopHard command produces
+ * vibrations where freq_low == 0.0f and freq_high == 0.0f, as defined in the HID function
+ * SendVibrationGcErmCommand, in order to differentiate between Stop and StopHard commands.
+ * This is done to reuse the controller vibration functions made for regular controllers.
+ */
+ if (last_vibration.low_frequency == 0.0f && last_vibration.high_frequency == 0.0f) {
+ return Core::HID::VibrationGcErmCommand::StopHard;
+ }
+
+ return Core::HID::VibrationGcErmCommand::Stop;
+ }();
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.vibration_device_handle.npad_type,
+ parameters.vibration_device_handle.npad_id,
+ parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(gc_erm_command);
+}
+
+void IHidServer::BeginPermitVibrationSession(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ GetResourceManager()->GetNpad()->SetPermitVibrationSession(true);
+
+ LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::EndPermitVibrationSession(HLERequestContext& ctx) {
+ GetResourceManager()->GetNpad()->SetPermitVibrationSession(false);
+
+ LOG_DEBUG(Service_HID, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::IsVibrationDeviceMounted(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::VibrationDeviceHandle vibration_device_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_DEBUG(Service_HID,
+ "called, npad_type={}, npad_id={}, device_index={}, applet_resource_user_id={}",
+ parameters.vibration_device_handle.npad_type,
+ parameters.vibration_device_handle.npad_id,
+ parameters.vibration_device_handle.device_index, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(GetResourceManager()->GetNpad()->IsVibrationDeviceMounted(
+ parameters.vibration_device_handle));
+}
+
+void IHidServer::ActivateConsoleSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_INFO(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ Result result = ResultSuccess;
+ auto console_sixaxis = GetResourceManager()->GetConsoleSixAxis();
+
+ if (!firmware_settings->IsDeviceManaged()) {
+ result = console_sixaxis->Activate();
+ }
+
+ if (result.IsSuccess()) {
+ result = console_sixaxis->Activate(applet_resource_user_id);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::StartConsoleSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::ConsoleSixAxisSensorHandle console_sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID,
+ "(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
+ parameters.console_sixaxis_handle.unknown_1,
+ parameters.console_sixaxis_handle.unknown_2, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::StopConsoleSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::ConsoleSixAxisSensorHandle console_sixaxis_handle;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID,
+ "(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
+ parameters.console_sixaxis_handle.unknown_1,
+ parameters.console_sixaxis_handle.unknown_2, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::ActivateSevenSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_INFO(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ Result result = ResultSuccess;
+ auto seven_sixaxis = GetResourceManager()->GetSevenSixAxis();
+
+ if (!firmware_settings->IsDeviceManaged()) {
+ result = seven_sixaxis->Activate();
+ }
+
+ if (result.IsSuccess()) {
+ seven_sixaxis->Activate(applet_resource_user_id);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::StartSevenSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
+ applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::StopSevenSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
+ applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::InitializeSevenSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+ const auto t_mem_1_size{rp.Pop<u64>()};
+ const auto t_mem_2_size{rp.Pop<u64>()};
+ const auto t_mem_1_handle{ctx.GetCopyHandle(0)};
+ const auto t_mem_2_handle{ctx.GetCopyHandle(1)};
+
+ ASSERT_MSG(t_mem_1_size == 0x1000, "t_mem_1_size is not 0x1000 bytes");
+ ASSERT_MSG(t_mem_2_size == 0x7F000, "t_mem_2_size is not 0x7F000 bytes");
+
+ auto t_mem_1 = system.ApplicationProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(
+ t_mem_1_handle);
+
+ if (t_mem_1.IsNull()) {
+ LOG_ERROR(Service_HID, "t_mem_1 is a nullptr for handle=0x{:08X}", t_mem_1_handle);
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultUnknown);
+ return;
+ }
+
+ auto t_mem_2 = system.ApplicationProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(
+ t_mem_2_handle);
+
+ if (t_mem_2.IsNull()) {
+ LOG_ERROR(Service_HID, "t_mem_2 is a nullptr for handle=0x{:08X}", t_mem_2_handle);
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultUnknown);
+ return;
+ }
+
+ ASSERT_MSG(t_mem_1->GetSize() == 0x1000, "t_mem_1 has incorrect size");
+ ASSERT_MSG(t_mem_2->GetSize() == 0x7F000, "t_mem_2 has incorrect size");
+
+ // Activate console six axis controller
+ GetResourceManager()->GetConsoleSixAxis()->Activate();
+ GetResourceManager()->GetSevenSixAxis()->Activate();
+
+ GetResourceManager()->GetSevenSixAxis()->SetTransferMemoryAddress(t_mem_1->GetSourceAddress());
+
+ LOG_WARNING(Service_HID,
+ "called, t_mem_1_handle=0x{:08X}, t_mem_2_handle=0x{:08X}, "
+ "applet_resource_user_id={}",
+ t_mem_1_handle, t_mem_2_handle, applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::FinalizeSevenSixAxisSensor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
+ applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::ResetSevenSixAxisSensorTimestamp(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ GetResourceManager()->GetSevenSixAxis()->ResetTimestamp();
+
+ LOG_WARNING(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::IsUsbFullKeyControllerEnabled(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(false);
+}
+
+void IHidServer::GetPalmaConnectionHandle(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ Core::HID::NpadIdType npad_id;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}",
+ parameters.npad_id, parameters.applet_resource_user_id);
+
+ Palma::PalmaConnectionHandle handle;
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result = controller->GetPalmaConnectionHandle(parameters.npad_id, handle);
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(result);
+ rb.PushRaw(handle);
+}
+
+void IHidServer::InitializePalma(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result = controller->InitializePalma(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::AcquirePalmaOperationCompleteEvent(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ auto controller = GetResourceManager()->GetPalma();
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ rb.PushCopyObjects(controller->AcquirePalmaOperationCompleteEvent(connection_handle));
+}
+
+void IHidServer::GetPalmaOperationInfo(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ Palma::PalmaOperationType operation_type;
+ Palma::PalmaOperationData data;
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result = controller->GetPalmaOperationInfo(connection_handle, operation_type, data);
+
+ if (result.IsError()) {
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+ }
+
+ ctx.WriteBuffer(data);
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(result);
+ rb.Push(static_cast<u64>(operation_type));
+}
+
+void IHidServer::PlayPalmaActivity(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+ const auto palma_activity{rp.Pop<u64>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, palma_activity={}",
+ connection_handle.npad_id, palma_activity);
+
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result = controller->PlayPalmaActivity(connection_handle, palma_activity);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::SetPalmaFrModeType(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+ const auto fr_mode{rp.PopEnum<Palma::PalmaFrModeType>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, fr_mode={}",
+ connection_handle.npad_id, fr_mode);
+
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result = controller->SetPalmaFrModeType(connection_handle, fr_mode);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ReadPalmaStep(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result = controller->ReadPalmaStep(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::EnablePalmaStep(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ bool is_enabled;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ Palma::PalmaConnectionHandle connection_handle;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, is_enabled={}",
+ parameters.connection_handle.npad_id, parameters.is_enabled);
+
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result =
+ controller->EnablePalmaStep(parameters.connection_handle, parameters.is_enabled);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ResetPalmaStep(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ auto controller = GetResourceManager()->GetPalma();
+ const auto result = controller->ResetPalmaStep(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ReadPalmaApplicationSection(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::WritePalmaApplicationSection(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::ReadPalmaUniqueCode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ GetResourceManager()->GetPalma()->ReadPalmaUniqueCode(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetPalmaUniqueCodeInvalid(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ GetResourceManager()->GetPalma()->SetPalmaUniqueCodeInvalid(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::WritePalmaActivityEntry(HLERequestContext& ctx) {
+ LOG_CRITICAL(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::WritePalmaRgbLedPatternEntry(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+ const auto unknown{rp.Pop<u64>()};
+
+ [[maybe_unused]] const auto buffer = ctx.ReadBuffer();
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, unknown={}",
+ connection_handle.npad_id, unknown);
+
+ GetResourceManager()->GetPalma()->WritePalmaRgbLedPatternEntry(connection_handle, unknown);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::WritePalmaWaveEntry(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+ const auto wave_set{rp.PopEnum<Palma::PalmaWaveSet>()};
+ const auto unknown{rp.Pop<u64>()};
+ const auto t_mem_size{rp.Pop<u64>()};
+ const auto t_mem_handle{ctx.GetCopyHandle(0)};
+ const auto size{rp.Pop<u64>()};
+
+ ASSERT_MSG(t_mem_size == 0x3000, "t_mem_size is not 0x3000 bytes");
+
+ auto t_mem = system.ApplicationProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(
+ t_mem_handle);
+
+ if (t_mem.IsNull()) {
+ LOG_ERROR(Service_HID, "t_mem is a nullptr for handle=0x{:08X}", t_mem_handle);
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultUnknown);
+ return;
+ }
+
+ ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size");
+
+ LOG_WARNING(Service_HID,
+ "(STUBBED) called, connection_handle={}, wave_set={}, unknown={}, "
+ "t_mem_handle=0x{:08X}, t_mem_size={}, size={}",
+ connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size);
+
+ GetResourceManager()->GetPalma()->WritePalmaWaveEntry(connection_handle, wave_set,
+ t_mem->GetSourceAddress(), t_mem_size);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ s32 database_id_version;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ Palma::PalmaConnectionHandle connection_handle;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, database_id_version={}",
+ parameters.connection_handle.npad_id, parameters.database_id_version);
+
+ GetResourceManager()->GetPalma()->SetPalmaDataBaseIdentificationVersion(
+ parameters.connection_handle, parameters.database_id_version);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ GetResourceManager()->GetPalma()->GetPalmaDataBaseIdentificationVersion(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SuspendPalmaFeature(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetPalmaOperationResult(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ const auto result =
+ GetResourceManager()->GetPalma()->GetPalmaOperationResult(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
+}
+
+void IHidServer::ReadPalmaPlayLog(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::ResetPalmaPlayLog(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetIsPalmaAllConnectable(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ bool is_palma_all_connectable;
+ INSERT_PADDING_BYTES_NOINIT(7);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID,
+ "(STUBBED) called, is_palma_all_connectable={},applet_resource_user_id={}",
+ parameters.is_palma_all_connectable, parameters.applet_resource_user_id);
+
+ GetResourceManager()->GetPalma()->SetIsPalmaAllConnectable(parameters.is_palma_all_connectable);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetIsPalmaPairedConnectable(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::PairPalma(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto connection_handle{rp.PopRaw<Palma::PalmaConnectionHandle>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
+
+ GetResourceManager()->GetPalma()->PairPalma(connection_handle);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetPalmaBoostMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto palma_boost_mode{rp.Pop<bool>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, palma_boost_mode={}", palma_boost_mode);
+
+ GetResourceManager()->GetPalma()->SetPalmaBoostMode(palma_boost_mode);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::CancelWritePalmaWaveEntry(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::EnablePalmaBoostMode(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetPalmaBluetoothAddress(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetDisallowedPalmaConnection(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::SetNpadCommunicationMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+ const auto communication_mode{rp.PopEnum<NPad::NpadCommunicationMode>()};
+
+ GetResourceManager()->GetNpad()->SetNpadCommunicationMode(communication_mode);
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}, communication_mode={}",
+ applet_resource_user_id, communication_mode);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::GetNpadCommunicationMode(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(GetResourceManager()->GetNpad()->GetNpadCommunicationMode());
+}
+
+void IHidServer::SetTouchScreenConfiguration(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto touchscreen_mode{rp.PopRaw<Core::HID::TouchScreenConfigurationForNx>()};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, touchscreen_mode={}, applet_resource_user_id={}",
+ touchscreen_mode.mode, applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidServer::IsFirmwareUpdateNeededForNotification(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ struct Parameters {
+ s32 unknown;
+ INSERT_PADDING_WORDS_NOINIT(1);
+ u64 applet_resource_user_id;
+ };
+ static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
+
+ const auto parameters{rp.PopRaw<Parameters>()};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, unknown={}, applet_resource_user_id={}",
+ parameters.unknown, parameters.applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(false);
+}
+
+void IHidServer::SetTouchScreenResolution(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto width{rp.Pop<u32>()};
+ const auto height{rp.Pop<u32>()};
+ const auto applet_resource_user_id{rp.Pop<u64>()};
+
+ GetResourceManager()->GetTouchScreen()->SetTouchscreenDimensions(width, height);
+
+ LOG_INFO(Service_HID, "called, width={}, height={}, applet_resource_user_id={}", width, height,
+ applet_resource_user_id);
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+std::shared_ptr<ResourceManager> IHidServer::GetResourceManager() {
+ resource_manager->Initialize();
+ return resource_manager;
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_server.h b/src/core/hle/service/hid/hid_server.h
new file mode 100644
index 000000000..cc7c4ebdd
--- /dev/null
+++ b/src/core/hle/service/hid/hid_server.h
@@ -0,0 +1,150 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "core/hle/service/service.h"
+
+namespace Core {
+class System;
+}
+
+namespace Service::HID {
+class ResourceManager;
+class HidFirmwareSettings;
+
+class IHidServer final : public ServiceFramework<IHidServer> {
+public:
+ explicit IHidServer(Core::System& system_, std::shared_ptr<ResourceManager> resource,
+ std::shared_ptr<HidFirmwareSettings> settings);
+ ~IHidServer() override;
+
+ std::shared_ptr<ResourceManager> GetResourceManager();
+
+private:
+ void CreateAppletResource(HLERequestContext& ctx);
+ void ActivateDebugPad(HLERequestContext& ctx);
+ void ActivateTouchScreen(HLERequestContext& ctx);
+ void ActivateMouse(HLERequestContext& ctx);
+ void ActivateKeyboard(HLERequestContext& ctx);
+ void SendKeyboardLockKeyEvent(HLERequestContext& ctx);
+ void AcquireXpadIdEventHandle(HLERequestContext& ctx);
+ void ReleaseXpadIdEventHandle(HLERequestContext& ctx);
+ void ActivateXpad(HLERequestContext& ctx);
+ void GetXpadIds(HLERequestContext& ctx);
+ void ActivateJoyXpad(HLERequestContext& ctx);
+ void GetJoyXpadLifoHandle(HLERequestContext& ctx);
+ void GetJoyXpadIds(HLERequestContext& ctx);
+ void ActivateSixAxisSensor(HLERequestContext& ctx);
+ void DeactivateSixAxisSensor(HLERequestContext& ctx);
+ void GetSixAxisSensorLifoHandle(HLERequestContext& ctx);
+ void ActivateJoySixAxisSensor(HLERequestContext& ctx);
+ void DeactivateJoySixAxisSensor(HLERequestContext& ctx);
+ void GetJoySixAxisSensorLifoHandle(HLERequestContext& ctx);
+ void StartSixAxisSensor(HLERequestContext& ctx);
+ void StopSixAxisSensor(HLERequestContext& ctx);
+ void IsSixAxisSensorFusionEnabled(HLERequestContext& ctx);
+ void EnableSixAxisSensorFusion(HLERequestContext& ctx);
+ void SetSixAxisSensorFusionParameters(HLERequestContext& ctx);
+ void GetSixAxisSensorFusionParameters(HLERequestContext& ctx);
+ void ResetSixAxisSensorFusionParameters(HLERequestContext& ctx);
+ void SetGyroscopeZeroDriftMode(HLERequestContext& ctx);
+ void GetGyroscopeZeroDriftMode(HLERequestContext& ctx);
+ void ResetGyroscopeZeroDriftMode(HLERequestContext& ctx);
+ void IsSixAxisSensorAtRest(HLERequestContext& ctx);
+ void IsFirmwareUpdateAvailableForSixAxisSensor(HLERequestContext& ctx);
+ void EnableSixAxisSensorUnalteredPassthrough(HLERequestContext& ctx);
+ void IsSixAxisSensorUnalteredPassthroughEnabled(HLERequestContext& ctx);
+ void LoadSixAxisSensorCalibrationParameter(HLERequestContext& ctx);
+ void GetSixAxisSensorIcInformation(HLERequestContext& ctx);
+ void ResetIsSixAxisSensorDeviceNewlyAssigned(HLERequestContext& ctx);
+ void ActivateGesture(HLERequestContext& ctx);
+ void SetSupportedNpadStyleSet(HLERequestContext& ctx);
+ void GetSupportedNpadStyleSet(HLERequestContext& ctx);
+ void SetSupportedNpadIdType(HLERequestContext& ctx);
+ void ActivateNpad(HLERequestContext& ctx);
+ void DeactivateNpad(HLERequestContext& ctx);
+ void AcquireNpadStyleSetUpdateEventHandle(HLERequestContext& ctx);
+ void DisconnectNpad(HLERequestContext& ctx);
+ void GetPlayerLedPattern(HLERequestContext& ctx);
+ void ActivateNpadWithRevision(HLERequestContext& ctx);
+ void SetNpadJoyHoldType(HLERequestContext& ctx);
+ void GetNpadJoyHoldType(HLERequestContext& ctx);
+ void SetNpadJoyAssignmentModeSingleByDefault(HLERequestContext& ctx);
+ void SetNpadJoyAssignmentModeSingle(HLERequestContext& ctx);
+ void SetNpadJoyAssignmentModeDual(HLERequestContext& ctx);
+ void MergeSingleJoyAsDualJoy(HLERequestContext& ctx);
+ void StartLrAssignmentMode(HLERequestContext& ctx);
+ void StopLrAssignmentMode(HLERequestContext& ctx);
+ void SetNpadHandheldActivationMode(HLERequestContext& ctx);
+ void GetNpadHandheldActivationMode(HLERequestContext& ctx);
+ void SwapNpadAssignment(HLERequestContext& ctx);
+ void IsUnintendedHomeButtonInputProtectionEnabled(HLERequestContext& ctx);
+ void EnableUnintendedHomeButtonInputProtection(HLERequestContext& ctx);
+ void SetNpadJoyAssignmentModeSingleWithDestination(HLERequestContext& ctx);
+ void SetNpadAnalogStickUseCenterClamp(HLERequestContext& ctx);
+ void SetNpadCaptureButtonAssignment(HLERequestContext& ctx);
+ void ClearNpadCaptureButtonAssignment(HLERequestContext& ctx);
+ void GetVibrationDeviceInfo(HLERequestContext& ctx);
+ void SendVibrationValue(HLERequestContext& ctx);
+ void GetActualVibrationValue(HLERequestContext& ctx);
+ void CreateActiveVibrationDeviceList(HLERequestContext& ctx);
+ void PermitVibration(HLERequestContext& ctx);
+ void IsVibrationPermitted(HLERequestContext& ctx);
+ void SendVibrationValues(HLERequestContext& ctx);
+ void SendVibrationGcErmCommand(HLERequestContext& ctx);
+ void GetActualVibrationGcErmCommand(HLERequestContext& ctx);
+ void BeginPermitVibrationSession(HLERequestContext& ctx);
+ void EndPermitVibrationSession(HLERequestContext& ctx);
+ void IsVibrationDeviceMounted(HLERequestContext& ctx);
+ void ActivateConsoleSixAxisSensor(HLERequestContext& ctx);
+ void StartConsoleSixAxisSensor(HLERequestContext& ctx);
+ void StopConsoleSixAxisSensor(HLERequestContext& ctx);
+ void ActivateSevenSixAxisSensor(HLERequestContext& ctx);
+ void StartSevenSixAxisSensor(HLERequestContext& ctx);
+ void StopSevenSixAxisSensor(HLERequestContext& ctx);
+ void InitializeSevenSixAxisSensor(HLERequestContext& ctx);
+ void FinalizeSevenSixAxisSensor(HLERequestContext& ctx);
+ void ResetSevenSixAxisSensorTimestamp(HLERequestContext& ctx);
+ void IsUsbFullKeyControllerEnabled(HLERequestContext& ctx);
+ void GetPalmaConnectionHandle(HLERequestContext& ctx);
+ void InitializePalma(HLERequestContext& ctx);
+ void AcquirePalmaOperationCompleteEvent(HLERequestContext& ctx);
+ void GetPalmaOperationInfo(HLERequestContext& ctx);
+ void PlayPalmaActivity(HLERequestContext& ctx);
+ void SetPalmaFrModeType(HLERequestContext& ctx);
+ void ReadPalmaStep(HLERequestContext& ctx);
+ void EnablePalmaStep(HLERequestContext& ctx);
+ void ResetPalmaStep(HLERequestContext& ctx);
+ void ReadPalmaApplicationSection(HLERequestContext& ctx);
+ void WritePalmaApplicationSection(HLERequestContext& ctx);
+ void ReadPalmaUniqueCode(HLERequestContext& ctx);
+ void SetPalmaUniqueCodeInvalid(HLERequestContext& ctx);
+ void WritePalmaActivityEntry(HLERequestContext& ctx);
+ void WritePalmaRgbLedPatternEntry(HLERequestContext& ctx);
+ void WritePalmaWaveEntry(HLERequestContext& ctx);
+ void SetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx);
+ void GetPalmaDataBaseIdentificationVersion(HLERequestContext& ctx);
+ void SuspendPalmaFeature(HLERequestContext& ctx);
+ void GetPalmaOperationResult(HLERequestContext& ctx);
+ void ReadPalmaPlayLog(HLERequestContext& ctx);
+ void ResetPalmaPlayLog(HLERequestContext& ctx);
+ void SetIsPalmaAllConnectable(HLERequestContext& ctx);
+ void SetIsPalmaPairedConnectable(HLERequestContext& ctx);
+ void PairPalma(HLERequestContext& ctx);
+ void SetPalmaBoostMode(HLERequestContext& ctx);
+ void CancelWritePalmaWaveEntry(HLERequestContext& ctx);
+ void EnablePalmaBoostMode(HLERequestContext& ctx);
+ void GetPalmaBluetoothAddress(HLERequestContext& ctx);
+ void SetDisallowedPalmaConnection(HLERequestContext& ctx);
+ void SetNpadCommunicationMode(HLERequestContext& ctx);
+ void GetNpadCommunicationMode(HLERequestContext& ctx);
+ void SetTouchScreenConfiguration(HLERequestContext& ctx);
+ void IsFirmwareUpdateNeededForNotification(HLERequestContext& ctx);
+ void SetTouchScreenResolution(HLERequestContext& ctx);
+
+ std::shared_ptr<ResourceManager> resource_manager;
+ std::shared_ptr<HidFirmwareSettings> firmware_settings;
+};
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_system_server.cpp b/src/core/hle/service/hid/hid_system_server.cpp
new file mode 100644
index 000000000..b56d0347a
--- /dev/null
+++ b/src/core/hle/service/hid/hid_system_server.cpp
@@ -0,0 +1,539 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "core/hid/hid_core.h"
+#include "core/hle/service/hid/controllers/npad.h"
+#include "core/hle/service/hid/controllers/touchscreen.h"
+#include "core/hle/service/hid/errors.h"
+#include "core/hle/service/hid/hid_system_server.h"
+#include "core/hle/service/hid/resource_manager.h"
+#include "core/hle/service/ipc_helpers.h"
+
+namespace Service::HID {
+
+IHidSystemServer::IHidSystemServer(Core::System& system_, std::shared_ptr<ResourceManager> resource)
+ : ServiceFramework{system_, "hid:sys"}, service_context{system_, service_name},
+ resource_manager{resource} {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {31, nullptr, "SendKeyboardLockKeyEvent"},
+ {101, nullptr, "AcquireHomeButtonEventHandle"},
+ {111, nullptr, "ActivateHomeButton"},
+ {121, nullptr, "AcquireSleepButtonEventHandle"},
+ {131, nullptr, "ActivateSleepButton"},
+ {141, nullptr, "AcquireCaptureButtonEventHandle"},
+ {151, nullptr, "ActivateCaptureButton"},
+ {161, nullptr, "GetPlatformConfig"},
+ {210, nullptr, "AcquireNfcDeviceUpdateEventHandle"},
+ {211, nullptr, "GetNpadsWithNfc"},
+ {212, nullptr, "AcquireNfcActivateEventHandle"},
+ {213, nullptr, "ActivateNfc"},
+ {214, nullptr, "GetXcdHandleForNpadWithNfc"},
+ {215, nullptr, "IsNfcActivated"},
+ {230, nullptr, "AcquireIrSensorEventHandle"},
+ {231, nullptr, "ActivateIrSensor"},
+ {232, nullptr, "GetIrSensorState"},
+ {233, nullptr, "GetXcdHandleForNpadWithIrSensor"},
+ {301, nullptr, "ActivateNpadSystem"},
+ {303, &IHidSystemServer::ApplyNpadSystemCommonPolicy, "ApplyNpadSystemCommonPolicy"},
+ {304, &IHidSystemServer::EnableAssigningSingleOnSlSrPress, "EnableAssigningSingleOnSlSrPress"},
+ {305, &IHidSystemServer::DisableAssigningSingleOnSlSrPress, "DisableAssigningSingleOnSlSrPress"},
+ {306, &IHidSystemServer::GetLastActiveNpad, "GetLastActiveNpad"},
+ {307, nullptr, "GetNpadSystemExtStyle"},
+ {308, &IHidSystemServer::ApplyNpadSystemCommonPolicyFull, "ApplyNpadSystemCommonPolicyFull"},
+ {309, &IHidSystemServer::GetNpadFullKeyGripColor, "GetNpadFullKeyGripColor"},
+ {310, &IHidSystemServer::GetMaskedSupportedNpadStyleSet, "GetMaskedSupportedNpadStyleSet"},
+ {311, nullptr, "SetNpadPlayerLedBlinkingDevice"},
+ {312, &IHidSystemServer::SetSupportedNpadStyleSetAll, "SetSupportedNpadStyleSetAll"},
+ {313, nullptr, "GetNpadCaptureButtonAssignment"},
+ {314, nullptr, "GetAppletFooterUiType"},
+ {315, &IHidSystemServer::GetAppletDetailedUiType, "GetAppletDetailedUiType"},
+ {316, &IHidSystemServer::GetNpadInterfaceType, "GetNpadInterfaceType"},
+ {317, &IHidSystemServer::GetNpadLeftRightInterfaceType, "GetNpadLeftRightInterfaceType"},
+ {318, &IHidSystemServer::HasBattery, "HasBattery"},
+ {319, &IHidSystemServer::HasLeftRightBattery, "HasLeftRightBattery"},
+ {321, &IHidSystemServer::GetUniquePadsFromNpad, "GetUniquePadsFromNpad"},
+ {322, &IHidSystemServer::GetIrSensorState, "GetIrSensorState"},
+ {323, nullptr, "GetXcdHandleForNpadWithIrSensor"},
+ {324, nullptr, "GetUniquePadButtonSet"},
+ {325, nullptr, "GetUniquePadColor"},
+ {326, nullptr, "GetUniquePadAppletDetailedUiType"},
+ {327, nullptr, "GetAbstractedPadIdDataFromNpad"},
+ {328, nullptr, "AttachAbstractedPadToNpad"},
+ {329, nullptr, "DetachAbstractedPadAll"},
+ {330, nullptr, "CheckAbstractedPadConnection"},
+ {500, nullptr, "SetAppletResourceUserId"},
+ {501, nullptr, "RegisterAppletResourceUserId"},
+ {502, nullptr, "UnregisterAppletResourceUserId"},
+ {503, nullptr, "EnableAppletToGetInput"},
+ {504, nullptr, "SetAruidValidForVibration"},
+ {505, nullptr, "EnableAppletToGetSixAxisSensor"},
+ {506, nullptr, "EnableAppletToGetPadInput"},
+ {507, nullptr, "EnableAppletToGetTouchScreen"},
+ {510, nullptr, "SetVibrationMasterVolume"},
+ {511, nullptr, "GetVibrationMasterVolume"},
+ {512, nullptr, "BeginPermitVibrationSession"},
+ {513, nullptr, "EndPermitVibrationSession"},
+ {514, nullptr, "Unknown514"},
+ {520, nullptr, "EnableHandheldHids"},
+ {521, nullptr, "DisableHandheldHids"},
+ {522, nullptr, "SetJoyConRailEnabled"},
+ {523, nullptr, "IsJoyConRailEnabled"},
+ {524, nullptr, "IsHandheldHidsEnabled"},
+ {525, nullptr, "IsJoyConAttachedOnAllRail"},
+ {540, nullptr, "AcquirePlayReportControllerUsageUpdateEvent"},
+ {541, nullptr, "GetPlayReportControllerUsages"},
+ {542, nullptr, "AcquirePlayReportRegisteredDeviceUpdateEvent"},
+ {543, nullptr, "GetRegisteredDevicesOld"},
+ {544, &IHidSystemServer::AcquireConnectionTriggerTimeoutEvent, "AcquireConnectionTriggerTimeoutEvent"},
+ {545, nullptr, "SendConnectionTrigger"},
+ {546, &IHidSystemServer::AcquireDeviceRegisteredEventForControllerSupport, "AcquireDeviceRegisteredEventForControllerSupport"},
+ {547, nullptr, "GetAllowedBluetoothLinksCount"},
+ {548, &IHidSystemServer::GetRegisteredDevices, "GetRegisteredDevices"},
+ {549, nullptr, "GetConnectableRegisteredDevices"},
+ {700, nullptr, "ActivateUniquePad"},
+ {702, &IHidSystemServer::AcquireUniquePadConnectionEventHandle, "AcquireUniquePadConnectionEventHandle"},
+ {703, &IHidSystemServer::GetUniquePadIds, "GetUniquePadIds"},
+ {751, &IHidSystemServer::AcquireJoyDetachOnBluetoothOffEventHandle, "AcquireJoyDetachOnBluetoothOffEventHandle"},
+ {800, nullptr, "ListSixAxisSensorHandles"},
+ {801, nullptr, "IsSixAxisSensorUserCalibrationSupported"},
+ {802, nullptr, "ResetSixAxisSensorCalibrationValues"},
+ {803, nullptr, "StartSixAxisSensorUserCalibration"},
+ {804, nullptr, "CancelSixAxisSensorUserCalibration"},
+ {805, nullptr, "GetUniquePadBluetoothAddress"},
+ {806, nullptr, "DisconnectUniquePad"},
+ {807, nullptr, "GetUniquePadType"},
+ {808, nullptr, "GetUniquePadInterface"},
+ {809, nullptr, "GetUniquePadSerialNumber"},
+ {810, nullptr, "GetUniquePadControllerNumber"},
+ {811, nullptr, "GetSixAxisSensorUserCalibrationStage"},
+ {812, nullptr, "GetConsoleUniqueSixAxisSensorHandle"},
+ {821, nullptr, "StartAnalogStickManualCalibration"},
+ {822, nullptr, "RetryCurrentAnalogStickManualCalibrationStage"},
+ {823, nullptr, "CancelAnalogStickManualCalibration"},
+ {824, nullptr, "ResetAnalogStickManualCalibration"},
+ {825, nullptr, "GetAnalogStickState"},
+ {826, nullptr, "GetAnalogStickManualCalibrationStage"},
+ {827, nullptr, "IsAnalogStickButtonPressed"},
+ {828, nullptr, "IsAnalogStickInReleasePosition"},
+ {829, nullptr, "IsAnalogStickInCircumference"},
+ {830, nullptr, "SetNotificationLedPattern"},
+ {831, nullptr, "SetNotificationLedPatternWithTimeout"},
+ {832, nullptr, "PrepareHidsForNotificationWake"},
+ {850, &IHidSystemServer::IsUsbFullKeyControllerEnabled, "IsUsbFullKeyControllerEnabled"},
+ {851, nullptr, "EnableUsbFullKeyController"},
+ {852, nullptr, "IsUsbConnected"},
+ {870, &IHidSystemServer::IsHandheldButtonPressedOnConsoleMode, "IsHandheldButtonPressedOnConsoleMode"},
+ {900, nullptr, "ActivateInputDetector"},
+ {901, nullptr, "NotifyInputDetector"},
+ {1000, &IHidSystemServer::InitializeFirmwareUpdate, "InitializeFirmwareUpdate"},
+ {1001, nullptr, "GetFirmwareVersion"},
+ {1002, nullptr, "GetAvailableFirmwareVersion"},
+ {1003, nullptr, "IsFirmwareUpdateAvailable"},
+ {1004, nullptr, "CheckFirmwareUpdateRequired"},
+ {1005, nullptr, "StartFirmwareUpdate"},
+ {1006, nullptr, "AbortFirmwareUpdate"},
+ {1007, nullptr, "GetFirmwareUpdateState"},
+ {1008, nullptr, "ActivateAudioControl"},
+ {1009, nullptr, "AcquireAudioControlEventHandle"},
+ {1010, nullptr, "GetAudioControlStates"},
+ {1011, nullptr, "DeactivateAudioControl"},
+ {1050, nullptr, "IsSixAxisSensorAccurateUserCalibrationSupported"},
+ {1051, nullptr, "StartSixAxisSensorAccurateUserCalibration"},
+ {1052, nullptr, "CancelSixAxisSensorAccurateUserCalibration"},
+ {1053, nullptr, "GetSixAxisSensorAccurateUserCalibrationState"},
+ {1100, nullptr, "GetHidbusSystemServiceObject"},
+ {1120, nullptr, "SetFirmwareHotfixUpdateSkipEnabled"},
+ {1130, nullptr, "InitializeUsbFirmwareUpdate"},
+ {1131, nullptr, "FinalizeUsbFirmwareUpdate"},
+ {1132, nullptr, "CheckUsbFirmwareUpdateRequired"},
+ {1133, nullptr, "StartUsbFirmwareUpdate"},
+ {1134, nullptr, "GetUsbFirmwareUpdateState"},
+ {1135, &IHidSystemServer::InitializeUsbFirmwareUpdateWithoutMemory, "InitializeUsbFirmwareUpdateWithoutMemory"},
+ {1150, nullptr, "SetTouchScreenMagnification"},
+ {1151, nullptr, "GetTouchScreenFirmwareVersion"},
+ {1152, nullptr, "SetTouchScreenDefaultConfiguration"},
+ {1153, &IHidSystemServer::GetTouchScreenDefaultConfiguration, "GetTouchScreenDefaultConfiguration"},
+ {1154, nullptr, "IsFirmwareAvailableForNotification"},
+ {1155, nullptr, "SetForceHandheldStyleVibration"},
+ {1156, nullptr, "SendConnectionTriggerWithoutTimeoutEvent"},
+ {1157, nullptr, "CancelConnectionTrigger"},
+ {1200, nullptr, "IsButtonConfigSupported"},
+ {1201, nullptr, "IsButtonConfigEmbeddedSupported"},
+ {1202, nullptr, "DeleteButtonConfig"},
+ {1203, nullptr, "DeleteButtonConfigEmbedded"},
+ {1204, nullptr, "SetButtonConfigEnabled"},
+ {1205, nullptr, "SetButtonConfigEmbeddedEnabled"},
+ {1206, nullptr, "IsButtonConfigEnabled"},
+ {1207, nullptr, "IsButtonConfigEmbeddedEnabled"},
+ {1208, nullptr, "SetButtonConfigEmbedded"},
+ {1209, nullptr, "SetButtonConfigFull"},
+ {1210, nullptr, "SetButtonConfigLeft"},
+ {1211, nullptr, "SetButtonConfigRight"},
+ {1212, nullptr, "GetButtonConfigEmbedded"},
+ {1213, nullptr, "GetButtonConfigFull"},
+ {1214, nullptr, "GetButtonConfigLeft"},
+ {1215, nullptr, "GetButtonConfigRight"},
+ {1250, nullptr, "IsCustomButtonConfigSupported"},
+ {1251, nullptr, "IsDefaultButtonConfigEmbedded"},
+ {1252, nullptr, "IsDefaultButtonConfigFull"},
+ {1253, nullptr, "IsDefaultButtonConfigLeft"},
+ {1254, nullptr, "IsDefaultButtonConfigRight"},
+ {1255, nullptr, "IsButtonConfigStorageEmbeddedEmpty"},
+ {1256, nullptr, "IsButtonConfigStorageFullEmpty"},
+ {1257, nullptr, "IsButtonConfigStorageLeftEmpty"},
+ {1258, nullptr, "IsButtonConfigStorageRightEmpty"},
+ {1259, nullptr, "GetButtonConfigStorageEmbeddedDeprecated"},
+ {1260, nullptr, "GetButtonConfigStorageFullDeprecated"},
+ {1261, nullptr, "GetButtonConfigStorageLeftDeprecated"},
+ {1262, nullptr, "GetButtonConfigStorageRightDeprecated"},
+ {1263, nullptr, "SetButtonConfigStorageEmbeddedDeprecated"},
+ {1264, nullptr, "SetButtonConfigStorageFullDeprecated"},
+ {1265, nullptr, "SetButtonConfigStorageLeftDeprecated"},
+ {1266, nullptr, "SetButtonConfigStorageRightDeprecated"},
+ {1267, nullptr, "DeleteButtonConfigStorageEmbedded"},
+ {1268, nullptr, "DeleteButtonConfigStorageFull"},
+ {1269, nullptr, "DeleteButtonConfigStorageLeft"},
+ {1270, nullptr, "DeleteButtonConfigStorageRight"},
+ {1271, nullptr, "IsUsingCustomButtonConfig"},
+ {1272, nullptr, "IsAnyCustomButtonConfigEnabled"},
+ {1273, nullptr, "SetAllCustomButtonConfigEnabled"},
+ {1274, nullptr, "SetDefaultButtonConfig"},
+ {1275, nullptr, "SetAllDefaultButtonConfig"},
+ {1276, nullptr, "SetHidButtonConfigEmbedded"},
+ {1277, nullptr, "SetHidButtonConfigFull"},
+ {1278, nullptr, "SetHidButtonConfigLeft"},
+ {1279, nullptr, "SetHidButtonConfigRight"},
+ {1280, nullptr, "GetHidButtonConfigEmbedded"},
+ {1281, nullptr, "GetHidButtonConfigFull"},
+ {1282, nullptr, "GetHidButtonConfigLeft"},
+ {1283, nullptr, "GetHidButtonConfigRight"},
+ {1284, nullptr, "GetButtonConfigStorageEmbedded"},
+ {1285, nullptr, "GetButtonConfigStorageFull"},
+ {1286, nullptr, "GetButtonConfigStorageLeft"},
+ {1287, nullptr, "GetButtonConfigStorageRight"},
+ {1288, nullptr, "SetButtonConfigStorageEmbedded"},
+ {1289, nullptr, "SetButtonConfigStorageFull"},
+ {1290, nullptr, "DeleteButtonConfigStorageRight"},
+ {1291, nullptr, "DeleteButtonConfigStorageRight"},
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+
+ joy_detach_event = service_context.CreateEvent("IHidSystemServer::JoyDetachEvent");
+ acquire_device_registered_event =
+ service_context.CreateEvent("IHidSystemServer::AcquireDeviceRegisteredEvent");
+ acquire_connection_trigger_timeout_event =
+ service_context.CreateEvent("IHidSystemServer::AcquireConnectionTriggerTimeoutEvent");
+ unique_pad_connection_event =
+ service_context.CreateEvent("IHidSystemServer::AcquireUniquePadConnectionEventHandle");
+}
+
+IHidSystemServer::~IHidSystemServer() {
+ service_context.CloseEvent(joy_detach_event);
+ service_context.CloseEvent(acquire_device_registered_event);
+ service_context.CloseEvent(acquire_connection_trigger_timeout_event);
+ service_context.CloseEvent(unique_pad_connection_event);
+};
+
+void IHidSystemServer::ApplyNpadSystemCommonPolicy(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "called");
+
+ GetResourceManager()->GetNpad()->ApplyNpadSystemCommonPolicy();
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::EnableAssigningSingleOnSlSrPress(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::DisableAssigningSingleOnSlSrPress(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::GetLastActiveNpad(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_HID, "(STUBBED) called"); // Spams a lot when controller applet is running
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(system.HIDCore().GetLastActiveController());
+}
+
+void IHidSystemServer::ApplyNpadSystemCommonPolicyFull(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "called");
+
+ GetResourceManager()->GetNpad()->ApplyNpadSystemCommonPolicy();
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::GetNpadFullKeyGripColor(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ LOG_DEBUG(Service_HID, "(STUBBED) called, npad_id_type={}",
+ npad_id_type); // Spams a lot when controller applet is running
+
+ Core::HID::NpadColor left_color{};
+ Core::HID::NpadColor right_color{};
+ // TODO: Get colors from Npad
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(left_color);
+ rb.PushRaw(right_color);
+}
+
+void IHidSystemServer::GetMaskedSupportedNpadStyleSet(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+
+ LOG_INFO(Service_HID, "(STUBBED) called");
+
+ Core::HID::NpadStyleSet supported_styleset =
+ GetResourceManager()->GetNpad()->GetSupportedStyleSet().raw;
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(supported_styleset);
+}
+
+void IHidSystemServer::SetSupportedNpadStyleSetAll(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+
+ LOG_INFO(Service_HID, "(STUBBED) called");
+
+ Core::HID::NpadStyleSet supported_styleset =
+ GetResourceManager()->GetNpad()->GetSupportedStyleSet().raw;
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(supported_styleset);
+}
+
+void IHidSystemServer::GetAppletDetailedUiType(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ LOG_DEBUG(Service_HID, "called, npad_id_type={}",
+ npad_id_type); // Spams a lot when controller applet is running
+
+ const NPad::AppletDetailedUiType detailed_ui_type =
+ GetResourceManager()->GetNpad()->GetAppletDetailedUiType(npad_id_type);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(detailed_ui_type);
+}
+
+void IHidSystemServer::GetNpadInterfaceType(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ LOG_DEBUG(Service_HID, "(STUBBED) called, npad_id_type={}",
+ npad_id_type); // Spams a lot when controller applet is running
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(Core::HID::NpadInterfaceType::Bluetooth);
+}
+
+void IHidSystemServer::GetNpadLeftRightInterfaceType(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ LOG_DEBUG(Service_HID, "(STUBBED) called, npad_id_type={}",
+ npad_id_type); // Spams a lot when controller applet is running
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.PushEnum(Core::HID::NpadInterfaceType::Bluetooth);
+ rb.PushEnum(Core::HID::NpadInterfaceType::Bluetooth);
+}
+
+void IHidSystemServer::HasBattery(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ LOG_DEBUG(Service_HID, "(STUBBED) called, npad_id_type={}",
+ npad_id_type); // Spams a lot when controller applet is running
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(false);
+}
+
+void IHidSystemServer::HasLeftRightBattery(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ LOG_DEBUG(Service_HID, "(STUBBED) called, npad_id_type={}",
+ npad_id_type); // Spams a lot when controller applet is running
+
+ struct LeftRightBattery {
+ bool left;
+ bool right;
+ };
+
+ LeftRightBattery left_right_battery{
+ .left = false,
+ .right = false,
+ };
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(left_right_battery);
+}
+
+void IHidSystemServer::GetUniquePadsFromNpad(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto npad_id_type{rp.PopEnum<Core::HID::NpadIdType>()};
+
+ LOG_DEBUG(Service_HID, "(STUBBED) called, npad_id_type={}",
+ npad_id_type); // Spams a lot when controller applet is running
+
+ const std::vector<Core::HID::UniquePadId> unique_pads{};
+
+ if (!unique_pads.empty()) {
+ ctx.WriteBuffer(unique_pads);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(static_cast<u32>(unique_pads.size()));
+}
+
+void IHidSystemServer::GetIrSensorState(HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::AcquireConnectionTriggerTimeoutEvent(HLERequestContext& ctx) {
+ LOG_INFO(Service_AM, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ rb.PushCopyObjects(acquire_device_registered_event->GetReadableEvent());
+}
+
+void IHidSystemServer::AcquireDeviceRegisteredEventForControllerSupport(HLERequestContext& ctx) {
+ LOG_INFO(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ rb.PushCopyObjects(acquire_device_registered_event->GetReadableEvent());
+}
+
+void IHidSystemServer::GetRegisteredDevices(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ struct RegisterData {
+ std::array<u8, 0x68> data;
+ };
+ static_assert(sizeof(RegisterData) == 0x68, "RegisterData is an invalid size");
+ std::vector<RegisterData> registered_devices{};
+
+ if (!registered_devices.empty()) {
+ ctx.WriteBuffer(registered_devices);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.Push<u64>(registered_devices.size());
+}
+
+void IHidSystemServer::AcquireUniquePadConnectionEventHandle(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.PushCopyObjects(unique_pad_connection_event->GetReadableEvent());
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::GetUniquePadIds(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(ResultSuccess);
+ rb.Push<u64>(0);
+}
+
+void IHidSystemServer::AcquireJoyDetachOnBluetoothOffEventHandle(HLERequestContext& ctx) {
+ LOG_INFO(Service_AM, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ rb.PushCopyObjects(joy_detach_event->GetReadableEvent());
+}
+
+void IHidSystemServer::IsUsbFullKeyControllerEnabled(HLERequestContext& ctx) {
+ const bool is_enabled = false;
+
+ LOG_WARNING(Service_HID, "(STUBBED) called, is_enabled={}", is_enabled);
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(is_enabled);
+}
+
+void IHidSystemServer::IsHandheldButtonPressedOnConsoleMode(HLERequestContext& ctx) {
+ const bool button_pressed = false;
+
+ LOG_DEBUG(Service_HID, "(STUBBED) called, is_enabled={}",
+ button_pressed); // Spams a lot when controller applet is open
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(button_pressed);
+}
+
+void IHidSystemServer::InitializeFirmwareUpdate(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::InitializeUsbFirmwareUpdateWithoutMemory(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+}
+
+void IHidSystemServer::GetTouchScreenDefaultConfiguration(HLERequestContext& ctx) {
+ LOG_WARNING(Service_HID, "(STUBBED) called");
+
+ Core::HID::TouchScreenConfigurationForNx touchscreen_config{
+ .mode = Core::HID::TouchScreenModeForNx::Finger,
+ };
+
+ if (touchscreen_config.mode != Core::HID::TouchScreenModeForNx::Heat2 &&
+ touchscreen_config.mode != Core::HID::TouchScreenModeForNx::Finger) {
+ touchscreen_config.mode = Core::HID::TouchScreenModeForNx::UseSystemSetting;
+ }
+
+ IPC::ResponseBuilder rb{ctx, 6};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(touchscreen_config);
+}
+
+std::shared_ptr<ResourceManager> IHidSystemServer::GetResourceManager() {
+ resource_manager->Initialize();
+ return resource_manager;
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_system_server.h b/src/core/hle/service/hid/hid_system_server.h
new file mode 100644
index 000000000..822d5e5b9
--- /dev/null
+++ b/src/core/hle/service/hid/hid_system_server.h
@@ -0,0 +1,63 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "core/hle/service/kernel_helpers.h"
+#include "core/hle/service/service.h"
+
+namespace Core {
+class System;
+}
+
+namespace Kernel {
+class KEvent;
+}
+
+namespace Service::HID {
+class ResourceManager;
+
+class IHidSystemServer final : public ServiceFramework<IHidSystemServer> {
+public:
+ explicit IHidSystemServer(Core::System& system_, std::shared_ptr<ResourceManager> resource);
+ ~IHidSystemServer() override;
+
+private:
+ void ApplyNpadSystemCommonPolicy(HLERequestContext& ctx);
+ void EnableAssigningSingleOnSlSrPress(HLERequestContext& ctx);
+ void DisableAssigningSingleOnSlSrPress(HLERequestContext& ctx);
+ void GetLastActiveNpad(HLERequestContext& ctx);
+ void ApplyNpadSystemCommonPolicyFull(HLERequestContext& ctx);
+ void GetNpadFullKeyGripColor(HLERequestContext& ctx);
+ void GetMaskedSupportedNpadStyleSet(HLERequestContext& ctx);
+ void SetSupportedNpadStyleSetAll(HLERequestContext& ctx);
+ void GetAppletDetailedUiType(HLERequestContext& ctx);
+ void GetNpadInterfaceType(HLERequestContext& ctx);
+ void GetNpadLeftRightInterfaceType(HLERequestContext& ctx);
+ void HasBattery(HLERequestContext& ctx);
+ void HasLeftRightBattery(HLERequestContext& ctx);
+ void GetUniquePadsFromNpad(HLERequestContext& ctx);
+ void GetIrSensorState(HLERequestContext& ctx);
+ void AcquireConnectionTriggerTimeoutEvent(HLERequestContext& ctx);
+ void AcquireDeviceRegisteredEventForControllerSupport(HLERequestContext& ctx);
+ void GetRegisteredDevices(HLERequestContext& ctx);
+ void AcquireUniquePadConnectionEventHandle(HLERequestContext& ctx);
+ void GetUniquePadIds(HLERequestContext& ctx);
+ void AcquireJoyDetachOnBluetoothOffEventHandle(HLERequestContext& ctx);
+ void IsUsbFullKeyControllerEnabled(HLERequestContext& ctx);
+ void IsHandheldButtonPressedOnConsoleMode(HLERequestContext& ctx);
+ void InitializeFirmwareUpdate(HLERequestContext& ctx);
+ void InitializeUsbFirmwareUpdateWithoutMemory(HLERequestContext& ctx);
+ void GetTouchScreenDefaultConfiguration(HLERequestContext& ctx);
+
+ std::shared_ptr<ResourceManager> GetResourceManager();
+
+ Kernel::KEvent* acquire_connection_trigger_timeout_event;
+ Kernel::KEvent* acquire_device_registered_event;
+ Kernel::KEvent* joy_detach_event;
+ Kernel::KEvent* unique_pad_connection_event;
+ KernelHelpers::ServiceContext service_context;
+ std::shared_ptr<ResourceManager> resource_manager;
+};
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid_util.h b/src/core/hle/service/hid/hid_util.h
new file mode 100644
index 000000000..b87cc10e3
--- /dev/null
+++ b/src/core/hle/service/hid/hid_util.h
@@ -0,0 +1,146 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "core/hid/hid_types.h"
+#include "core/hle/service/hid/errors.h"
+
+namespace Service::HID {
+
+constexpr bool IsNpadIdValid(const Core::HID::NpadIdType npad_id) {
+ switch (npad_id) {
+ case Core::HID::NpadIdType::Player1:
+ case Core::HID::NpadIdType::Player2:
+ case Core::HID::NpadIdType::Player3:
+ case Core::HID::NpadIdType::Player4:
+ case Core::HID::NpadIdType::Player5:
+ case Core::HID::NpadIdType::Player6:
+ case Core::HID::NpadIdType::Player7:
+ case Core::HID::NpadIdType::Player8:
+ case Core::HID::NpadIdType::Other:
+ case Core::HID::NpadIdType::Handheld:
+ return true;
+ default:
+ return false;
+ }
+}
+
+constexpr Result IsSixaxisHandleValid(const Core::HID::SixAxisSensorHandle& handle) {
+ const auto npad_id = IsNpadIdValid(static_cast<Core::HID::NpadIdType>(handle.npad_id));
+ const bool device_index = handle.device_index < Core::HID::DeviceIndex::MaxDeviceIndex;
+
+ if (!npad_id) {
+ return InvalidNpadId;
+ }
+ if (!device_index) {
+ return NpadDeviceIndexOutOfRange;
+ }
+
+ return ResultSuccess;
+}
+
+constexpr Result IsVibrationHandleValid(const Core::HID::VibrationDeviceHandle& handle) {
+ switch (handle.npad_type) {
+ case Core::HID::NpadStyleIndex::ProController:
+ case Core::HID::NpadStyleIndex::Handheld:
+ case Core::HID::NpadStyleIndex::JoyconDual:
+ case Core::HID::NpadStyleIndex::JoyconLeft:
+ case Core::HID::NpadStyleIndex::JoyconRight:
+ case Core::HID::NpadStyleIndex::GameCube:
+ case Core::HID::NpadStyleIndex::N64:
+ case Core::HID::NpadStyleIndex::SystemExt:
+ case Core::HID::NpadStyleIndex::System:
+ // These support vibration
+ break;
+ default:
+ return VibrationInvalidStyleIndex;
+ }
+
+ if (!IsNpadIdValid(static_cast<Core::HID::NpadIdType>(handle.npad_id))) {
+ return VibrationInvalidNpadId;
+ }
+
+ if (handle.device_index >= Core::HID::DeviceIndex::MaxDeviceIndex) {
+ return VibrationDeviceIndexOutOfRange;
+ }
+
+ return ResultSuccess;
+}
+
+/// Converts a Core::HID::NpadIdType to an array index.
+constexpr size_t NpadIdTypeToIndex(Core::HID::NpadIdType npad_id_type) {
+ switch (npad_id_type) {
+ case Core::HID::NpadIdType::Player1:
+ return 0;
+ case Core::HID::NpadIdType::Player2:
+ return 1;
+ case Core::HID::NpadIdType::Player3:
+ return 2;
+ case Core::HID::NpadIdType::Player4:
+ return 3;
+ case Core::HID::NpadIdType::Player5:
+ return 4;
+ case Core::HID::NpadIdType::Player6:
+ return 5;
+ case Core::HID::NpadIdType::Player7:
+ return 6;
+ case Core::HID::NpadIdType::Player8:
+ return 7;
+ case Core::HID::NpadIdType::Handheld:
+ return 8;
+ case Core::HID::NpadIdType::Other:
+ return 9;
+ default:
+ return 8;
+ }
+}
+
+/// Converts an array index to a Core::HID::NpadIdType
+constexpr Core::HID::NpadIdType IndexToNpadIdType(size_t index) {
+ switch (index) {
+ case 0:
+ return Core::HID::NpadIdType::Player1;
+ case 1:
+ return Core::HID::NpadIdType::Player2;
+ case 2:
+ return Core::HID::NpadIdType::Player3;
+ case 3:
+ return Core::HID::NpadIdType::Player4;
+ case 4:
+ return Core::HID::NpadIdType::Player5;
+ case 5:
+ return Core::HID::NpadIdType::Player6;
+ case 6:
+ return Core::HID::NpadIdType::Player7;
+ case 7:
+ return Core::HID::NpadIdType::Player8;
+ case 8:
+ return Core::HID::NpadIdType::Handheld;
+ case 9:
+ return Core::HID::NpadIdType::Other;
+ default:
+ return Core::HID::NpadIdType::Invalid;
+ }
+}
+
+constexpr Core::HID::NpadStyleSet GetStylesetByIndex(std::size_t index) {
+ switch (index) {
+ case 0:
+ return Core::HID::NpadStyleSet::Fullkey;
+ case 1:
+ return Core::HID::NpadStyleSet::Handheld;
+ case 2:
+ return Core::HID::NpadStyleSet::JoyDual;
+ case 3:
+ return Core::HID::NpadStyleSet::JoyLeft;
+ case 4:
+ return Core::HID::NpadStyleSet::JoyRight;
+ case 5:
+ return Core::HID::NpadStyleSet::Palma;
+ default:
+ return Core::HID::NpadStyleSet::None;
+ }
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hidbus/hidbus_base.cpp b/src/core/hle/service/hid/hidbus/hidbus_base.cpp
index ee522c36e..8c44f93e8 100644
--- a/src/core/hle/service/hid/hidbus/hidbus_base.cpp
+++ b/src/core/hle/service/hid/hidbus/hidbus_base.cpp
@@ -13,7 +13,10 @@ HidbusBase::HidbusBase(Core::System& system_, KernelHelpers::ServiceContext& ser
: system(system_), service_context(service_context_) {
send_command_async_event = service_context.CreateEvent("hidbus:SendCommandAsyncEvent");
}
-HidbusBase::~HidbusBase() = default;
+
+HidbusBase::~HidbusBase() {
+ service_context.CloseEvent(send_command_async_event);
+};
void HidbusBase::ActivateDevice() {
if (is_activated) {
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp
index 221c33b86..39b9a4474 100644
--- a/src/core/hle/service/hid/irs.cpp
+++ b/src/core/hle/service/hid/irs.cpp
@@ -12,6 +12,7 @@
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/hid/errors.h"
+#include "core/hle/service/hid/hid_util.h"
#include "core/hle/service/hid/irs.h"
#include "core/hle/service/hid/irsensor/clustering_processor.h"
#include "core/hle/service/hid/irsensor/image_transfer_processor.h"
@@ -138,7 +139,7 @@ void IRS::RunMomentProcessor(HLERequestContext& ctx) {
if (result.IsSuccess()) {
auto& device = GetIrCameraSharedMemoryDeviceEntry(parameters.camera_handle);
- MakeProcessor<MomentProcessor>(parameters.camera_handle, device);
+ MakeProcessorWithCoreContext<MomentProcessor>(parameters.camera_handle, device);
auto& image_transfer_processor = GetProcessor<MomentProcessor>(parameters.camera_handle);
image_transfer_processor.SetConfig(parameters.processor_config);
npad_device->SetPollingMode(Core::HID::EmulatedDeviceIndex::RightIndex,
@@ -320,7 +321,7 @@ void IRS::GetNpadIrCameraHandle(HLERequestContext& ctx) {
}
Core::IrSensor::IrCameraHandle camera_handle{
- .npad_id = static_cast<u8>(NpadIdTypeToIndex(npad_id)),
+ .npad_id = static_cast<u8>(HID::NpadIdTypeToIndex(npad_id)),
.npad_type = Core::HID::NpadStyleIndex::None,
};
@@ -545,7 +546,7 @@ void IRS::ActivateIrsensorWithFunctionLevel(HLERequestContext& ctx) {
Result IRS::IsIrCameraHandleValid(const Core::IrSensor::IrCameraHandle& camera_handle) const {
if (camera_handle.npad_id >
- static_cast<u8>(NpadIdTypeToIndex(Core::HID::NpadIdType::Handheld))) {
+ static_cast<u8>(HID::NpadIdTypeToIndex(Core::HID::NpadIdType::Handheld))) {
return InvalidIrCameraHandle;
}
if (camera_handle.npad_type != Core::HID::NpadStyleIndex::None) {
diff --git a/src/core/hle/service/hid/irs.h b/src/core/hle/service/hid/irs.h
index a8fa19025..c8e6dab17 100644
--- a/src/core/hle/service/hid/irs.h
+++ b/src/core/hle/service/hid/irs.h
@@ -3,15 +3,12 @@
#pragma once
+#include "core/core.h"
#include "core/hid/hid_types.h"
#include "core/hid/irs_types.h"
#include "core/hle/service/hid/irsensor/processor_base.h"
#include "core/hle/service/service.h"
-namespace Core {
-class System;
-}
-
namespace Core::HID {
class EmulatedController;
} // namespace Core::HID
diff --git a/src/core/hle/service/hid/irsensor/clustering_processor.cpp b/src/core/hle/service/hid/irsensor/clustering_processor.cpp
index e2f4ae876..c559eb0d5 100644
--- a/src/core/hle/service/hid/irsensor/clustering_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/clustering_processor.cpp
@@ -3,16 +3,18 @@
#include <queue>
+#include "core/core.h"
+#include "core/core_timing.h"
#include "core/hid/emulated_controller.h"
#include "core/hid/hid_core.h"
#include "core/hle/service/hid/irsensor/clustering_processor.h"
namespace Service::IRS {
-ClusteringProcessor::ClusteringProcessor(Core::HID::HIDCore& hid_core_,
+ClusteringProcessor::ClusteringProcessor(Core::System& system_,
Core::IrSensor::DeviceFormat& device_format,
std::size_t npad_index)
- : device{device_format} {
- npad_device = hid_core_.GetEmulatedControllerByIndex(npad_index);
+ : device{device_format}, system{system_} {
+ npad_device = system.HIDCore().GetEmulatedControllerByIndex(npad_index);
device.mode = Core::IrSensor::IrSensorMode::ClusteringProcessor;
device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected;
@@ -48,7 +50,7 @@ void ClusteringProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType ty
}
next_state = {};
- const auto camera_data = npad_device->GetCamera();
+ const auto& camera_data = npad_device->GetCamera();
auto filtered_image = camera_data.data;
RemoveLowIntensityData(filtered_image);
@@ -83,7 +85,7 @@ void ClusteringProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType ty
}
next_state.sampling_number = camera_data.sample;
- next_state.timestamp = next_state.timestamp + 131;
+ next_state.timestamp = system.CoreTiming().GetGlobalTimeNs().count();
next_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low;
shared_memory->clustering_lifo.WriteNextEntry(next_state);
@@ -202,14 +204,14 @@ ClusteringProcessor::ClusteringData ClusteringProcessor::MergeCluster(
}
u8 ClusteringProcessor::GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const {
- if ((y * width) + x > data.size()) {
+ if ((y * width) + x >= data.size()) {
return 0;
}
return data[(y * width) + x];
}
void ClusteringProcessor::SetPixel(std::vector<u8>& data, std::size_t x, std::size_t y, u8 value) {
- if ((y * width) + x > data.size()) {
+ if ((y * width) + x >= data.size()) {
return;
}
data[(y * width) + x] = value;
diff --git a/src/core/hle/service/hid/irsensor/clustering_processor.h b/src/core/hle/service/hid/irsensor/clustering_processor.h
index dc01a8ea7..83f34734a 100644
--- a/src/core/hle/service/hid/irsensor/clustering_processor.h
+++ b/src/core/hle/service/hid/irsensor/clustering_processor.h
@@ -8,6 +8,10 @@
#include "core/hle/service/hid/irs_ring_lifo.h"
#include "core/hle/service/hid/irsensor/processor_base.h"
+namespace Core {
+class System;
+}
+
namespace Core::HID {
class EmulatedController;
} // namespace Core::HID
@@ -15,8 +19,7 @@ class EmulatedController;
namespace Service::IRS {
class ClusteringProcessor final : public ProcessorBase {
public:
- explicit ClusteringProcessor(Core::HID::HIDCore& hid_core_,
- Core::IrSensor::DeviceFormat& device_format,
+ explicit ClusteringProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format,
std::size_t npad_index);
~ClusteringProcessor() override;
@@ -106,5 +109,7 @@ private:
Core::IrSensor::DeviceFormat& device;
Core::HID::EmulatedController* npad_device;
int callback_key{};
+
+ Core::System& system;
};
} // namespace Service::IRS
diff --git a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
index 803a6277c..22067a591 100644
--- a/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/image_transfer_processor.cpp
@@ -49,7 +49,7 @@ void ImageTransferProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType
return;
}
- const auto camera_data = npad_device->GetCamera();
+ const auto& camera_data = npad_device->GetCamera();
// This indicates how much ambient light is present
processor_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low;
diff --git a/src/core/hle/service/hid/irsensor/moment_processor.cpp b/src/core/hle/service/hid/irsensor/moment_processor.cpp
index dbaca420a..cf045bda7 100644
--- a/src/core/hle/service/hid/irsensor/moment_processor.cpp
+++ b/src/core/hle/service/hid/irsensor/moment_processor.cpp
@@ -1,24 +1,137 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hid/emulated_controller.h"
+#include "core/hid/hid_core.h"
#include "core/hle/service/hid/irsensor/moment_processor.h"
namespace Service::IRS {
-MomentProcessor::MomentProcessor(Core::IrSensor::DeviceFormat& device_format)
- : device(device_format) {
+static constexpr auto format = Core::IrSensor::ImageTransferProcessorFormat::Size40x30;
+static constexpr std::size_t ImageWidth = 40;
+static constexpr std::size_t ImageHeight = 30;
+
+MomentProcessor::MomentProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format,
+ std::size_t npad_index)
+ : device(device_format), system{system_} {
+ npad_device = system.HIDCore().GetEmulatedControllerByIndex(npad_index);
+
device.mode = Core::IrSensor::IrSensorMode::MomentProcessor;
device.camera_status = Core::IrSensor::IrCameraStatus::Unconnected;
device.camera_internal_status = Core::IrSensor::IrCameraInternalStatus::Stopped;
+
+ shared_memory = std::construct_at(
+ reinterpret_cast<MomentSharedMemory*>(&device_format.state.processor_raw_data));
+
+ Core::HID::ControllerUpdateCallback engine_callback{
+ .on_change = [this](Core::HID::ControllerTriggerType type) { OnControllerUpdate(type); },
+ .is_npad_service = true,
+ };
+ callback_key = npad_device->SetCallback(engine_callback);
}
-MomentProcessor::~MomentProcessor() = default;
+MomentProcessor::~MomentProcessor() {
+ npad_device->DeleteCallback(callback_key);
+};
-void MomentProcessor::StartProcessor() {}
+void MomentProcessor::StartProcessor() {
+ device.camera_status = Core::IrSensor::IrCameraStatus::Available;
+ device.camera_internal_status = Core::IrSensor::IrCameraInternalStatus::Ready;
+}
void MomentProcessor::SuspendProcessor() {}
void MomentProcessor::StopProcessor() {}
+void MomentProcessor::OnControllerUpdate(Core::HID::ControllerTriggerType type) {
+ if (type != Core::HID::ControllerTriggerType::IrSensor) {
+ return;
+ }
+
+ next_state = {};
+ const auto& camera_data = npad_device->GetCamera();
+
+ const auto window_width = static_cast<std::size_t>(current_config.window_of_interest.width);
+ const auto window_height = static_cast<std::size_t>(current_config.window_of_interest.height);
+ const auto window_start_x = static_cast<std::size_t>(current_config.window_of_interest.x);
+ const auto window_start_y = static_cast<std::size_t>(current_config.window_of_interest.y);
+
+ const std::size_t block_width = window_width / Columns;
+ const std::size_t block_height = window_height / Rows;
+
+ for (std::size_t row = 0; row < Rows; row++) {
+ for (std::size_t column = 0; column < Columns; column++) {
+ const size_t x_pos = (column * block_width) + window_start_x;
+ const size_t y_pos = (row * block_height) + window_start_y;
+ auto& statistic = next_state.statistic[column + (row * Columns)];
+ statistic = GetStatistic(camera_data.data, x_pos, y_pos, block_width, block_height);
+ }
+ }
+
+ next_state.sampling_number = camera_data.sample;
+ next_state.timestamp = system.CoreTiming().GetGlobalTimeNs().count();
+ next_state.ambient_noise_level = Core::IrSensor::CameraAmbientNoiseLevel::Low;
+ shared_memory->moment_lifo.WriteNextEntry(next_state);
+
+ if (!IsProcessorActive()) {
+ StartProcessor();
+ }
+}
+
+u8 MomentProcessor::GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const {
+ if ((y * ImageWidth) + x >= data.size()) {
+ return 0;
+ }
+ return data[(y * ImageWidth) + x];
+}
+
+MomentProcessor::MomentStatistic MomentProcessor::GetStatistic(const std::vector<u8>& data,
+ std::size_t start_x,
+ std::size_t start_y,
+ std::size_t width,
+ std::size_t height) const {
+ // The actual implementation is always 320x240
+ static constexpr std::size_t RealWidth = 320;
+ static constexpr std::size_t RealHeight = 240;
+ static constexpr std::size_t Threshold = 30;
+ MomentStatistic statistic{};
+ std::size_t active_points{};
+
+ // Sum all data points on the block that meet with the threshold
+ for (std::size_t y = 0; y < width; y++) {
+ for (std::size_t x = 0; x < height; x++) {
+ const size_t x_pos = x + start_x;
+ const size_t y_pos = y + start_y;
+ const auto pixel =
+ GetPixel(data, x_pos * ImageWidth / RealWidth, y_pos * ImageHeight / RealHeight);
+
+ if (pixel < Threshold) {
+ continue;
+ }
+
+ statistic.average_intensity += pixel;
+
+ statistic.centroid.x += static_cast<float>(x_pos);
+ statistic.centroid.y += static_cast<float>(y_pos);
+
+ active_points++;
+ }
+ }
+
+ // Return an empty field if no points were available
+ if (active_points == 0) {
+ return {};
+ }
+
+ // Finally calculate the actual centroid and average intensity
+ statistic.centroid.x /= static_cast<float>(active_points);
+ statistic.centroid.y /= static_cast<float>(active_points);
+ statistic.average_intensity /= static_cast<f32>(width * height);
+
+ return statistic;
+}
+
void MomentProcessor::SetConfig(Core::IrSensor::PackedMomentProcessorConfig config) {
current_config.camera_config.exposure_time = config.camera_config.exposure_time;
current_config.camera_config.gain = config.camera_config.gain;
@@ -29,6 +142,8 @@ void MomentProcessor::SetConfig(Core::IrSensor::PackedMomentProcessorConfig conf
current_config.preprocess =
static_cast<Core::IrSensor::MomentProcessorPreprocess>(config.preprocess);
current_config.preprocess_intensity_threshold = config.preprocess_intensity_threshold;
+
+ npad_device->SetCameraFormat(format);
}
} // namespace Service::IRS
diff --git a/src/core/hle/service/hid/irsensor/moment_processor.h b/src/core/hle/service/hid/irsensor/moment_processor.h
index d4bd22e0f..398cfbdc1 100644
--- a/src/core/hle/service/hid/irsensor/moment_processor.h
+++ b/src/core/hle/service/hid/irsensor/moment_processor.h
@@ -6,12 +6,22 @@
#include "common/bit_field.h"
#include "common/common_types.h"
#include "core/hid/irs_types.h"
+#include "core/hle/service/hid/irs_ring_lifo.h"
#include "core/hle/service/hid/irsensor/processor_base.h"
+namespace Core {
+class System;
+}
+
+namespace Core::HID {
+class EmulatedController;
+} // namespace Core::HID
+
namespace Service::IRS {
class MomentProcessor final : public ProcessorBase {
public:
- explicit MomentProcessor(Core::IrSensor::DeviceFormat& device_format);
+ explicit MomentProcessor(Core::System& system_, Core::IrSensor::DeviceFormat& device_format,
+ std::size_t npad_index);
~MomentProcessor() override;
// Called when the processor is initialized
@@ -27,6 +37,9 @@ public:
void SetConfig(Core::IrSensor::PackedMomentProcessorConfig config);
private:
+ static constexpr std::size_t Columns = 8;
+ static constexpr std::size_t Rows = 6;
+
// This is nn::irsensor::MomentProcessorConfig
struct MomentProcessorConfig {
Core::IrSensor::CameraConfig camera_config;
@@ -50,12 +63,29 @@ private:
u64 timestamp;
Core::IrSensor::CameraAmbientNoiseLevel ambient_noise_level;
INSERT_PADDING_BYTES(4);
- std::array<MomentStatistic, 0x30> stadistic;
+ std::array<MomentStatistic, Columns * Rows> statistic;
};
static_assert(sizeof(MomentProcessorState) == 0x258, "MomentProcessorState is an invalid size");
+ struct MomentSharedMemory {
+ Service::IRS::Lifo<MomentProcessorState, 6> moment_lifo;
+ };
+ static_assert(sizeof(MomentSharedMemory) == 0xE20, "MomentSharedMemory is an invalid size");
+
+ void OnControllerUpdate(Core::HID::ControllerTriggerType type);
+ u8 GetPixel(const std::vector<u8>& data, std::size_t x, std::size_t y) const;
+ MomentStatistic GetStatistic(const std::vector<u8>& data, std::size_t start_x,
+ std::size_t start_y, std::size_t width, std::size_t height) const;
+
+ MomentSharedMemory* shared_memory = nullptr;
+ MomentProcessorState next_state{};
+
MomentProcessorConfig current_config{};
Core::IrSensor::DeviceFormat& device;
+ Core::HID::EmulatedController* npad_device;
+ int callback_key{};
+
+ Core::System& system;
};
} // namespace Service::IRS
diff --git a/src/core/hle/service/hid/resource_manager.cpp b/src/core/hle/service/hid/resource_manager.cpp
new file mode 100644
index 000000000..e76d4eea9
--- /dev/null
+++ b/src/core/hle/service/hid/resource_manager.cpp
@@ -0,0 +1,241 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/logging/log.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/hid/hid_core.h"
+#include "core/hle/kernel/k_shared_memory.h"
+#include "core/hle/service/hid/resource_manager.h"
+#include "core/hle/service/ipc_helpers.h"
+
+#include "core/hle/service/hid/controllers/console_six_axis.h"
+#include "core/hle/service/hid/controllers/debug_pad.h"
+#include "core/hle/service/hid/controllers/gesture.h"
+#include "core/hle/service/hid/controllers/keyboard.h"
+#include "core/hle/service/hid/controllers/mouse.h"
+#include "core/hle/service/hid/controllers/npad.h"
+#include "core/hle/service/hid/controllers/palma.h"
+#include "core/hle/service/hid/controllers/seven_six_axis.h"
+#include "core/hle/service/hid/controllers/six_axis.h"
+#include "core/hle/service/hid/controllers/stubbed.h"
+#include "core/hle/service/hid/controllers/touchscreen.h"
+#include "core/hle/service/hid/controllers/xpad.h"
+
+namespace Service::HID {
+
+// Updating period for each HID device.
+// Period time is obtained by measuring the number of samples in a second on HW using a homebrew
+// Correct npad_update_ns is 4ms this is overclocked to lower input lag
+constexpr auto npad_update_ns = std::chrono::nanoseconds{1 * 1000 * 1000}; // (1ms, 1000Hz)
+constexpr auto default_update_ns = std::chrono::nanoseconds{4 * 1000 * 1000}; // (4ms, 1000Hz)
+constexpr auto mouse_keyboard_update_ns = std::chrono::nanoseconds{8 * 1000 * 1000}; // (8ms, 125Hz)
+constexpr auto motion_update_ns = std::chrono::nanoseconds{5 * 1000 * 1000}; // (5ms, 200Hz)
+
+ResourceManager::ResourceManager(Core::System& system_)
+ : system{system_}, service_context{system_, "hid"} {}
+
+ResourceManager::~ResourceManager() = default;
+
+void ResourceManager::Initialize() {
+ if (is_initialized) {
+ return;
+ }
+
+ u8* shared_memory = system.Kernel().GetHidSharedMem().GetPointer();
+ debug_pad = std::make_shared<DebugPad>(system.HIDCore(), shared_memory);
+ mouse = std::make_shared<Mouse>(system.HIDCore(), shared_memory);
+ debug_mouse = std::make_shared<DebugMouse>(system.HIDCore(), shared_memory);
+ keyboard = std::make_shared<Keyboard>(system.HIDCore(), shared_memory);
+ unique_pad = std::make_shared<UniquePad>(system.HIDCore(), shared_memory);
+ npad = std::make_shared<NPad>(system.HIDCore(), shared_memory, service_context);
+ gesture = std::make_shared<Gesture>(system.HIDCore(), shared_memory);
+ touch_screen = std::make_shared<TouchScreen>(system.HIDCore(), shared_memory);
+ xpad = std::make_shared<XPad>(system.HIDCore(), shared_memory);
+
+ palma = std::make_shared<Palma>(system.HIDCore(), shared_memory, service_context);
+
+ home_button = std::make_shared<HomeButton>(system.HIDCore(), shared_memory);
+ sleep_button = std::make_shared<SleepButton>(system.HIDCore(), shared_memory);
+ capture_button = std::make_shared<CaptureButton>(system.HIDCore(), shared_memory);
+
+ six_axis = std::make_shared<SixAxis>(system.HIDCore(), npad);
+ console_six_axis = std::make_shared<ConsoleSixAxis>(system.HIDCore(), shared_memory);
+ seven_six_axis = std::make_shared<SevenSixAxis>(system);
+
+ home_button->SetCommonHeaderOffset(0x4C00);
+ sleep_button->SetCommonHeaderOffset(0x4E00);
+ capture_button->SetCommonHeaderOffset(0x5000);
+ unique_pad->SetCommonHeaderOffset(0x5A00);
+ debug_mouse->SetCommonHeaderOffset(0x3DC00);
+
+ // Homebrew doesn't try to activate some controllers, so we activate them by default
+ npad->Activate();
+ six_axis->Activate();
+ touch_screen->Activate();
+
+ system.HIDCore().ReloadInputDevices();
+ is_initialized = true;
+}
+std::shared_ptr<CaptureButton> ResourceManager::GetCaptureButton() const {
+ return capture_button;
+}
+
+std::shared_ptr<ConsoleSixAxis> ResourceManager::GetConsoleSixAxis() const {
+ return console_six_axis;
+}
+
+std::shared_ptr<DebugMouse> ResourceManager::GetDebugMouse() const {
+ return debug_mouse;
+}
+
+std::shared_ptr<DebugPad> ResourceManager::GetDebugPad() const {
+ return debug_pad;
+}
+
+std::shared_ptr<Gesture> ResourceManager::GetGesture() const {
+ return gesture;
+}
+
+std::shared_ptr<HomeButton> ResourceManager::GetHomeButton() const {
+ return home_button;
+}
+
+std::shared_ptr<Keyboard> ResourceManager::GetKeyboard() const {
+ return keyboard;
+}
+
+std::shared_ptr<Mouse> ResourceManager::GetMouse() const {
+ return mouse;
+}
+
+std::shared_ptr<NPad> ResourceManager::GetNpad() const {
+ return npad;
+}
+
+std::shared_ptr<Palma> ResourceManager::GetPalma() const {
+ return palma;
+}
+
+std::shared_ptr<SevenSixAxis> ResourceManager::GetSevenSixAxis() const {
+ return seven_six_axis;
+}
+
+std::shared_ptr<SixAxis> ResourceManager::GetSixAxis() const {
+ return six_axis;
+}
+
+std::shared_ptr<SleepButton> ResourceManager::GetSleepButton() const {
+ return sleep_button;
+}
+
+std::shared_ptr<TouchScreen> ResourceManager::GetTouchScreen() const {
+ return touch_screen;
+}
+
+std::shared_ptr<UniquePad> ResourceManager::GetUniquePad() const {
+ return unique_pad;
+}
+
+void ResourceManager::UpdateControllers(std::uintptr_t user_data,
+ std::chrono::nanoseconds ns_late) {
+ auto& core_timing = system.CoreTiming();
+ debug_pad->OnUpdate(core_timing);
+ unique_pad->OnUpdate(core_timing);
+ gesture->OnUpdate(core_timing);
+ touch_screen->OnUpdate(core_timing);
+ palma->OnUpdate(core_timing);
+ home_button->OnUpdate(core_timing);
+ sleep_button->OnUpdate(core_timing);
+ capture_button->OnUpdate(core_timing);
+ xpad->OnUpdate(core_timing);
+}
+
+void ResourceManager::UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
+ auto& core_timing = system.CoreTiming();
+ npad->OnUpdate(core_timing);
+}
+
+void ResourceManager::UpdateMouseKeyboard(std::uintptr_t user_data,
+ std::chrono::nanoseconds ns_late) {
+ auto& core_timing = system.CoreTiming();
+ mouse->OnUpdate(core_timing);
+ debug_mouse->OnUpdate(core_timing);
+ keyboard->OnUpdate(core_timing);
+}
+
+void ResourceManager::UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
+ auto& core_timing = system.CoreTiming();
+ six_axis->OnUpdate(core_timing);
+ seven_six_axis->OnUpdate(core_timing);
+ console_six_axis->OnUpdate(core_timing);
+}
+
+IAppletResource::IAppletResource(Core::System& system_, std::shared_ptr<ResourceManager> resource)
+ : ServiceFramework{system_, "IAppletResource"} {
+ static const FunctionInfo functions[] = {
+ {0, &IAppletResource::GetSharedMemoryHandle, "GetSharedMemoryHandle"},
+ };
+ RegisterHandlers(functions);
+
+ resource->Initialize();
+
+ // Register update callbacks
+ npad_update_event = Core::Timing::CreateEvent(
+ "HID::UpdatePadCallback",
+ [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
+ -> std::optional<std::chrono::nanoseconds> {
+ const auto guard = LockService();
+ resource->UpdateNpad(user_data, ns_late);
+ return std::nullopt;
+ });
+ default_update_event = Core::Timing::CreateEvent(
+ "HID::UpdateDefaultCallback",
+ [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
+ -> std::optional<std::chrono::nanoseconds> {
+ const auto guard = LockService();
+ resource->UpdateControllers(user_data, ns_late);
+ return std::nullopt;
+ });
+ mouse_keyboard_update_event = Core::Timing::CreateEvent(
+ "HID::UpdateMouseKeyboardCallback",
+ [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
+ -> std::optional<std::chrono::nanoseconds> {
+ const auto guard = LockService();
+ resource->UpdateMouseKeyboard(user_data, ns_late);
+ return std::nullopt;
+ });
+ motion_update_event = Core::Timing::CreateEvent(
+ "HID::UpdateMotionCallback",
+ [this, resource](std::uintptr_t user_data, s64 time, std::chrono::nanoseconds ns_late)
+ -> std::optional<std::chrono::nanoseconds> {
+ const auto guard = LockService();
+ resource->UpdateMotion(user_data, ns_late);
+ return std::nullopt;
+ });
+
+ system.CoreTiming().ScheduleLoopingEvent(npad_update_ns, npad_update_ns, npad_update_event);
+ system.CoreTiming().ScheduleLoopingEvent(default_update_ns, default_update_ns,
+ default_update_event);
+ system.CoreTiming().ScheduleLoopingEvent(mouse_keyboard_update_ns, mouse_keyboard_update_ns,
+ mouse_keyboard_update_event);
+ system.CoreTiming().ScheduleLoopingEvent(motion_update_ns, motion_update_ns,
+ motion_update_event);
+}
+
+IAppletResource::~IAppletResource() {
+ system.CoreTiming().UnscheduleEvent(npad_update_event, 0);
+ system.CoreTiming().UnscheduleEvent(default_update_event, 0);
+ system.CoreTiming().UnscheduleEvent(mouse_keyboard_update_event, 0);
+ system.CoreTiming().UnscheduleEvent(motion_update_event, 0);
+}
+
+void IAppletResource::GetSharedMemoryHandle(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_HID, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(ResultSuccess);
+ rb.PushCopyObjects(&system.Kernel().GetHidSharedMem());
+}
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/resource_manager.h b/src/core/hle/service/hid/resource_manager.h
new file mode 100644
index 000000000..2b6a9b5e6
--- /dev/null
+++ b/src/core/hle/service/hid/resource_manager.h
@@ -0,0 +1,111 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "core/hle/service/kernel_helpers.h"
+#include "core/hle/service/service.h"
+
+namespace Core::Timing {
+struct EventType;
+}
+
+namespace Service::HID {
+class Controller_Stubbed;
+class ConsoleSixAxis;
+class DebugPad;
+class Gesture;
+class Keyboard;
+class Mouse;
+class NPad;
+class Palma;
+class SevenSixAxis;
+class SixAxis;
+class TouchScreen;
+class XPad;
+
+using CaptureButton = Controller_Stubbed;
+using DebugMouse = Controller_Stubbed;
+using HomeButton = Controller_Stubbed;
+using SleepButton = Controller_Stubbed;
+using UniquePad = Controller_Stubbed;
+
+class ResourceManager {
+
+public:
+ explicit ResourceManager(Core::System& system_);
+ ~ResourceManager();
+
+ void Initialize();
+
+ std::shared_ptr<CaptureButton> GetCaptureButton() const;
+ std::shared_ptr<ConsoleSixAxis> GetConsoleSixAxis() const;
+ std::shared_ptr<DebugMouse> GetDebugMouse() const;
+ std::shared_ptr<DebugPad> GetDebugPad() const;
+ std::shared_ptr<Gesture> GetGesture() const;
+ std::shared_ptr<HomeButton> GetHomeButton() const;
+ std::shared_ptr<Keyboard> GetKeyboard() const;
+ std::shared_ptr<Mouse> GetMouse() const;
+ std::shared_ptr<NPad> GetNpad() const;
+ std::shared_ptr<Palma> GetPalma() const;
+ std::shared_ptr<SevenSixAxis> GetSevenSixAxis() const;
+ std::shared_ptr<SixAxis> GetSixAxis() const;
+ std::shared_ptr<SleepButton> GetSleepButton() const;
+ std::shared_ptr<TouchScreen> GetTouchScreen() const;
+ std::shared_ptr<UniquePad> GetUniquePad() const;
+
+ void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
+ void UpdateNpad(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
+ void UpdateMouseKeyboard(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
+ void UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
+
+private:
+ bool is_initialized{false};
+
+ std::shared_ptr<CaptureButton> capture_button = nullptr;
+ std::shared_ptr<ConsoleSixAxis> console_six_axis = nullptr;
+ std::shared_ptr<DebugMouse> debug_mouse = nullptr;
+ std::shared_ptr<DebugPad> debug_pad = nullptr;
+ std::shared_ptr<Gesture> gesture = nullptr;
+ std::shared_ptr<HomeButton> home_button = nullptr;
+ std::shared_ptr<Keyboard> keyboard = nullptr;
+ std::shared_ptr<Mouse> mouse = nullptr;
+ std::shared_ptr<NPad> npad = nullptr;
+ std::shared_ptr<Palma> palma = nullptr;
+ std::shared_ptr<SevenSixAxis> seven_six_axis = nullptr;
+ std::shared_ptr<SixAxis> six_axis = nullptr;
+ std::shared_ptr<SleepButton> sleep_button = nullptr;
+ std::shared_ptr<TouchScreen> touch_screen = nullptr;
+ std::shared_ptr<UniquePad> unique_pad = nullptr;
+ std::shared_ptr<XPad> xpad = nullptr;
+
+ // TODO: Create these resources
+ // std::shared_ptr<AudioControl> audio_control = nullptr;
+ // std::shared_ptr<ButtonConfig> button_config = nullptr;
+ // std::shared_ptr<Config> config = nullptr;
+ // std::shared_ptr<Connection> connection = nullptr;
+ // std::shared_ptr<CustomConfig> custom_config = nullptr;
+ // std::shared_ptr<Digitizer> digitizer = nullptr;
+ // std::shared_ptr<Hdls> hdls = nullptr;
+ // std::shared_ptr<PlayReport> play_report = nullptr;
+ // std::shared_ptr<Rail> rail = nullptr;
+
+ Core::System& system;
+ KernelHelpers::ServiceContext service_context;
+};
+
+class IAppletResource final : public ServiceFramework<IAppletResource> {
+public:
+ explicit IAppletResource(Core::System& system_, std::shared_ptr<ResourceManager> resource);
+ ~IAppletResource() override;
+
+private:
+ void GetSharedMemoryHandle(HLERequestContext& ctx);
+
+ std::shared_ptr<Core::Timing::EventType> npad_update_event;
+ std::shared_ptr<Core::Timing::EventType> default_update_event;
+ std::shared_ptr<Core::Timing::EventType> mouse_keyboard_update_event;
+ std::shared_ptr<Core::Timing::EventType> motion_update_event;
+};
+
+} // namespace Service::HID
diff --git a/src/core/hle/service/hid/ring_lifo.h b/src/core/hle/service/hid/ring_lifo.h
index 65eb7ea02..0816784e0 100644
--- a/src/core/hle/service/hid/ring_lifo.h
+++ b/src/core/hle/service/hid/ring_lifo.h
@@ -32,15 +32,15 @@ struct Lifo {
}
std::size_t GetPreviousEntryIndex() const {
- return static_cast<size_t>((buffer_tail + total_buffer_count - 1) % total_buffer_count);
+ return static_cast<size_t>((buffer_tail + max_buffer_size - 1) % max_buffer_size);
}
std::size_t GetNextEntryIndex() const {
- return static_cast<size_t>((buffer_tail + 1) % total_buffer_count);
+ return static_cast<size_t>((buffer_tail + 1) % max_buffer_size);
}
void WriteNextEntry(const State& new_state) {
- if (buffer_count < total_buffer_count - 1) {
+ if (buffer_count < static_cast<s64>(max_buffer_size) - 1) {
buffer_count++;
}
buffer_tail = GetNextEntryIndex();
diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp
index 6a313a03b..f51e63564 100644
--- a/src/core/hle/service/kernel_helpers.cpp
+++ b/src/core/hle/service/kernel_helpers.cpp
@@ -21,10 +21,8 @@ ServiceContext::ServiceContext(Core::System& system_, std::string name_)
// Create the process.
process = Kernel::KProcess::Create(kernel);
- ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
- Kernel::KProcess::ProcessType::KernelInternal,
- kernel.GetSystemResourceLimit())
- .IsSuccess());
+ ASSERT(R_SUCCEEDED(process->Initialize(Kernel::Svc::CreateProcessParameter{},
+ kernel.GetSystemResourceLimit(), false)));
// Register the process.
Kernel::KProcess::Register(kernel, process);
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index 7927f8264..961f89a14 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -115,12 +115,20 @@ public:
{400, nullptr, "InitializeSystem"},
{401, nullptr, "FinalizeSystem"},
{402, nullptr, "SetOperationMode"},
- {403, nullptr, "InitializeSystem2"},
+ {403, &ISystemLocalCommunicationService::InitializeSystem2, "InitializeSystem2"},
};
// clang-format on
RegisterHandlers(functions);
}
+
+private:
+ void InitializeSystem2(HLERequestContext& ctx) {
+ LOG_WARNING(Service_LDN, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ResultSuccess);
+ }
};
class IUserLocalCommunicationService final
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index c73035c77..97b6a9385 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -286,9 +286,14 @@ public:
rb.Push(ResultSuccess);
}
- bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
+ bool ValidateRegionForMap(Kernel::KProcessPageTable& page_table, VAddr start,
+ std::size_t size) const {
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
- const auto start_info{page_table.QueryInfo(start - 1)};
+
+ Kernel::KMemoryInfo start_info;
+ Kernel::Svc::PageInfo page_info;
+ R_ASSERT(
+ page_table.QueryInfo(std::addressof(start_info), std::addressof(page_info), start - 1));
if (start_info.GetState() != Kernel::KMemoryState::Free) {
return {};
@@ -298,7 +303,9 @@ public:
return {};
}
- const auto end_info{page_table.QueryInfo(start + size)};
+ Kernel::KMemoryInfo end_info;
+ R_ASSERT(page_table.QueryInfo(std::addressof(end_info), std::addressof(page_info),
+ start + size));
if (end_info.GetState() != Kernel::KMemoryState::Free) {
return {};
@@ -307,7 +314,7 @@ public:
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
}
- Result GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
+ Result GetAvailableMapRegion(Kernel::KProcessPageTable& page_table, u64 size, VAddr& out_addr) {
size = Common::AlignUp(size, Kernel::PageSize);
size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
@@ -391,12 +398,8 @@ public:
if (bss_size) {
auto block_guard = detail::ScopeExit([&] {
- page_table.UnmapCodeMemory(
- addr + nro_size, bss_addr, bss_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
- page_table.UnmapCodeMemory(
- addr, nro_addr, nro_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange);
+ page_table.UnmapCodeMemory(addr + nro_size, bss_addr, bss_size);
+ page_table.UnmapCodeMemory(addr, nro_addr, nro_size);
});
const Result result{page_table.MapCodeMemory(addr + nro_size, bss_addr, bss_size)};
@@ -578,21 +581,17 @@ public:
auto& page_table{system.ApplicationProcess()->GetPageTable()};
if (info.bss_size != 0) {
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address + info.text_size + info.ro_size + info.data_size, info.bss_address,
- info.bss_size, Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size +
+ info.data_size,
+ info.bss_address, info.bss_size));
}
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address + info.text_size + info.ro_size,
- info.src_addr + info.text_size + info.ro_size, info.data_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
- R_TRY(page_table.UnmapCodeMemory(
- info.nro_address, info.src_addr, info.text_size,
- Kernel::KPageTable::ICacheInvalidationStrategy::InvalidateRange));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size + info.ro_size,
+ info.src_addr + info.text_size + info.ro_size,
+ info.data_size));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address + info.text_size,
+ info.src_addr + info.text_size, info.ro_size));
+ R_TRY(page_table.UnmapCodeMemory(info.nro_address, info.src_addr, info.text_size));
return ResultSuccess;
}
diff --git a/src/core/hle/service/mii/types/ver3_store_data.cpp b/src/core/hle/service/mii/types/ver3_store_data.cpp
index a019cc9f7..c27646fcf 100644
--- a/src/core/hle/service/mii/types/ver3_store_data.cpp
+++ b/src/core/hle/service/mii/types/ver3_store_data.cpp
@@ -98,7 +98,7 @@ void Ver3StoreData::BuildToStoreData(StoreData& out_store_data) const {
}
void Ver3StoreData::BuildFromStoreData(const StoreData& store_data) {
- version = 1;
+ version = 3;
mii_information.gender.Assign(static_cast<u8>(store_data.GetGender()));
mii_information.favorite_color.Assign(static_cast<u8>(store_data.GetFavoriteColor()));
height = store_data.GetHeight();
diff --git a/src/core/hle/service/nfc/common/device.cpp b/src/core/hle/service/nfc/common/device.cpp
index e7a00deb3..47516f883 100644
--- a/src/core/hle/service/nfc/common/device.cpp
+++ b/src/core/hle/service/nfc/common/device.cpp
@@ -401,6 +401,12 @@ Result NfcDevice::SendCommandByPassThrough(const Time::Clock::TimeSpanType& time
}
Result NfcDevice::Mount(NFP::ModelType model_type, NFP::MountTarget mount_target_) {
+ bool is_corrupted = false;
+
+ if (model_type != NFP::ModelType::Amiibo) {
+ return ResultInvalidArgument;
+ }
+
if (device_state != DeviceState::TagFound) {
LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
return ResultWrongDeviceState;
@@ -420,26 +426,32 @@ Result NfcDevice::Mount(NFP::ModelType model_type, NFP::MountTarget mount_target
if (is_plain_amiibo) {
std::vector<u8> data(sizeof(NFP::NTAG215File));
memcpy(data.data(), &tag_data, sizeof(tag_data));
- WriteBackupData(tag_data.uid, data);
+ }
- device_state = DeviceState::TagMounted;
- mount_target = mount_target_;
- return ResultSuccess;
+ if (!is_plain_amiibo && !NFP::AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) {
+ LOG_ERROR(Service_NFP, "Can't decode amiibo");
+ is_corrupted = true;
}
- if (!NFP::AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) {
- bool has_backup = HasBackup(encrypted_tag_data.uuid).IsSuccess();
- LOG_ERROR(Service_NFP, "Can't decode amiibo, has_backup= {}", has_backup);
- return has_backup ? ResultCorruptedDataWithBackup : ResultCorruptedData;
+ if (tag_data.settings.settings.amiibo_initialized && !tag_data.owner_mii.IsValid()) {
+ LOG_ERROR(Service_NFP, "Invalid mii data");
+ is_corrupted = true;
}
- std::vector<u8> data(sizeof(NFP::EncryptedNTAG215File));
- memcpy(data.data(), &encrypted_tag_data, sizeof(encrypted_tag_data));
- WriteBackupData(encrypted_tag_data.uuid, data);
+ if (!is_corrupted) {
+ std::vector<u8> data(sizeof(NFP::EncryptedNTAG215File));
+ memcpy(data.data(), &encrypted_tag_data, sizeof(encrypted_tag_data));
+ WriteBackupData(encrypted_tag_data.uuid, data);
+ }
device_state = DeviceState::TagMounted;
mount_target = mount_target_;
+ if (is_corrupted) {
+ bool has_backup = HasBackup(encrypted_tag_data.uuid).IsSuccess();
+ return has_backup ? ResultCorruptedDataWithBackup : ResultCorruptedData;
+ }
+
return ResultSuccess;
}
@@ -606,6 +618,17 @@ Result NfcDevice::Restore() {
}
}
+ // Restore mii data in case is corrupted by previous instances of yuzu
+ if (tag_data.settings.settings.amiibo_initialized && !tag_data.owner_mii.IsValid()) {
+ LOG_ERROR(Service_NFP, "Regenerating mii data");
+ Mii::StoreData new_mii{};
+ new_mii.BuildRandom(Mii::Age::All, Mii::Gender::All, Mii::Race::All);
+ new_mii.SetNickname({u'y', u'u', u'z', u'u', u'\0'});
+
+ tag_data.owner_mii.BuildFromStoreData(new_mii);
+ tag_data.mii_extension.SetFromStoreData(new_mii);
+ }
+
// Overwrite tag contents with backup and mount the tag
tag_data = temporary_tag_data;
encrypted_tag_data = temporary_encrypted_tag_data;
@@ -851,25 +874,6 @@ Result NfcDevice::SetRegisterInfoPrivate(const NFP::RegisterInfoPrivate& registe
return Flush();
}
-Result NfcDevice::RestoreAmiibo() {
- if (device_state != DeviceState::TagMounted) {
- LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
- if (device_state == DeviceState::TagRemoved) {
- return ResultTagRemoved;
- }
- return ResultWrongDeviceState;
- }
-
- if (mount_target == NFP::MountTarget::None || mount_target == NFP::MountTarget::Rom) {
- LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
- return ResultWrongDeviceState;
- }
-
- // TODO: Load amiibo from backup on system
- LOG_ERROR(Service_NFP, "Not Implemented");
- return ResultSuccess;
-}
-
Result NfcDevice::Format() {
Result result = ResultSuccess;
@@ -877,7 +881,9 @@ Result NfcDevice::Format() {
result = Mount(NFP::ModelType::Amiibo, NFP::MountTarget::All);
}
- if (result.IsError()) {
+ // We are formatting all data. Corruption is not an issue.
+ if (result.IsError() &&
+ (result != ResultCorruptedData && result != ResultCorruptedDataWithBackup)) {
return result;
}
diff --git a/src/core/hle/service/nfc/common/device.h b/src/core/hle/service/nfc/common/device.h
index 0ed1ff34c..d8efe25ec 100644
--- a/src/core/hle/service/nfc/common/device.h
+++ b/src/core/hle/service/nfc/common/device.h
@@ -68,7 +68,6 @@ public:
Result DeleteRegisterInfo();
Result SetRegisterInfoPrivate(const NFP::RegisterInfoPrivate& register_info);
- Result RestoreAmiibo();
Result Format();
Result OpenApplicationArea(u32 access_id);
diff --git a/src/core/hle/service/nfc/common/device_manager.cpp b/src/core/hle/service/nfc/common/device_manager.cpp
index a71d26157..ad534177d 100644
--- a/src/core/hle/service/nfc/common/device_manager.cpp
+++ b/src/core/hle/service/nfc/common/device_manager.cpp
@@ -7,6 +7,7 @@
#include "core/core.h"
#include "core/hid/hid_types.h"
#include "core/hle/kernel/k_event.h"
+#include "core/hle/service/hid/hid_util.h"
#include "core/hle/service/ipc_helpers.h"
#include "core/hle/service/nfc/common/device.h"
#include "core/hle/service/nfc/common/device_manager.h"
@@ -24,7 +25,7 @@ DeviceManager::DeviceManager(Core::System& system_, KernelHelpers::ServiceContex
for (u32 device_index = 0; device_index < devices.size(); device_index++) {
devices[device_index] =
- std::make_shared<NfcDevice>(Core::HID::IndexToNpadIdType(device_index), system,
+ std::make_shared<NfcDevice>(HID::IndexToNpadIdType(device_index), system,
service_context, availability_change_event);
}
diff --git a/src/core/hle/service/nvdrv/devices/ioctl_serialization.h b/src/core/hle/service/nvdrv/devices/ioctl_serialization.h
new file mode 100644
index 000000000..b12bcd138
--- /dev/null
+++ b/src/core/hle/service/nvdrv/devices/ioctl_serialization.h
@@ -0,0 +1,159 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <span>
+#include <vector>
+
+#include "common/concepts.h"
+#include "core/hle/service/nvdrv/devices/nvdevice.h"
+
+namespace Service::Nvidia::Devices {
+
+struct IoctlOneArgTraits {
+ template <typename T, typename R, typename A, typename... B>
+ static A GetFirstArgImpl(R (T::*)(A, B...));
+};
+
+struct IoctlTwoArgTraits {
+ template <typename T, typename R, typename A, typename B, typename... C>
+ static A GetFirstArgImpl(R (T::*)(A, B, C...));
+
+ template <typename T, typename R, typename A, typename B, typename... C>
+ static B GetSecondArgImpl(R (T::*)(A, B, C...));
+};
+
+struct Null {};
+
+// clang-format off
+
+template <typename FixedArg, typename VarArg, typename InlInVarArg, typename InlOutVarArg, typename F>
+NvResult WrapGeneric(F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, std::span<u8> inline_output) {
+ constexpr bool HasFixedArg = !std::is_same_v<FixedArg, Null>;
+ constexpr bool HasVarArg = !std::is_same_v<VarArg, Null>;
+ constexpr bool HasInlInVarArg = !std::is_same_v<InlInVarArg, Null>;
+ constexpr bool HasInlOutVarArg = !std::is_same_v<InlOutVarArg, Null>;
+
+ // Declare the fixed-size input value.
+ FixedArg fixed{};
+ size_t var_offset = 0;
+
+ if constexpr (HasFixedArg) {
+ // Read the fixed-size input value.
+ var_offset = std::min(sizeof(FixedArg), input.size());
+ if (var_offset > 0) {
+ std::memcpy(&fixed, input.data(), var_offset);
+ }
+ }
+
+ // Read the variable-sized inputs.
+ const size_t num_var_args = HasVarArg ? ((input.size() - var_offset) / sizeof(VarArg)) : 0;
+ std::vector<VarArg> var_args(num_var_args);
+ if constexpr (HasVarArg) {
+ if (num_var_args > 0) {
+ std::memcpy(var_args.data(), input.data() + var_offset, num_var_args * sizeof(VarArg));
+ }
+ }
+
+ const size_t num_inl_in_var_args = HasInlInVarArg ? (inline_input.size() / sizeof(InlInVarArg)) : 0;
+ std::vector<InlInVarArg> inl_in_var_args(num_inl_in_var_args);
+ if constexpr (HasInlInVarArg) {
+ if (num_inl_in_var_args > 0) {
+ std::memcpy(inl_in_var_args.data(), inline_input.data(), num_inl_in_var_args * sizeof(InlInVarArg));
+ }
+ }
+
+ // Construct inline output data.
+ const size_t num_inl_out_var_args = HasInlOutVarArg ? (inline_output.size() / sizeof(InlOutVarArg)) : 0;
+ std::vector<InlOutVarArg> inl_out_var_args(num_inl_out_var_args);
+
+ // Perform the call.
+ NvResult result = callable(fixed, var_args, inl_in_var_args, inl_out_var_args);
+
+ // Copy outputs.
+ if constexpr (HasFixedArg) {
+ if (output.size() > 0) {
+ std::memcpy(output.data(), &fixed, std::min(output.size(), sizeof(FixedArg)));
+ }
+ }
+
+ if constexpr (HasVarArg) {
+ if (num_var_args > 0 && output.size() > var_offset) {
+ const size_t max_var_size = output.size() - var_offset;
+ std::memcpy(output.data() + var_offset, var_args.data(), std::min(max_var_size, num_var_args * sizeof(VarArg)));
+ }
+ }
+
+ // Copy inline outputs.
+ if constexpr (HasInlOutVarArg) {
+ if (num_inl_out_var_args > 0) {
+ std::memcpy(inline_output.data(), inl_out_var_args.data(), num_inl_out_var_args * sizeof(InlOutVarArg));
+ }
+ }
+
+ // We're done.
+ return result;
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixed(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, Null, Null, Null>(std::move(Callable), input, {}, output, {});
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixedInlOut(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, std::span<u8> inline_output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
+ using InlOutVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, inl_out, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, Null, Null, InlOutVarArg>(std::move(Callable), input, {}, output, inline_output);
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
+ using VarArg = typename std::remove_reference_t<decltype(IoctlOneArgTraits::GetFirstArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(var, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<Null, VarArg, Null, Null>(std::move(Callable), input, {}, output, {});
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixedVariable(Self* self, F&& callable, std::span<const u8> input, std::span<u8> output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
+ using VarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, var, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, VarArg, Null, Null>(std::move(Callable), input, {}, output, {});
+}
+
+template <typename Self, typename F, typename... Rest>
+NvResult WrapFixedInlIn(Self* self, F&& callable, std::span<const u8> input, std::span<const u8> inline_input, std::span<u8> output, Rest&&... rest) {
+ using FixedArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetFirstArgImpl(callable))>;
+ using InlInVarArg = typename std::remove_reference_t<decltype(IoctlTwoArgTraits::GetSecondArgImpl(callable))>::value_type;
+
+ const auto Callable = [&](auto& fixed, auto& var, auto& inl_in, auto& inl_out) -> NvResult {
+ return (self->*callable)(fixed, inl_in, std::forward<Rest>(rest)...);
+ };
+
+ return WrapGeneric<FixedArg, Null, InlInVarArg, Null>(std::move(Callable), input, inline_input, output, {});
+}
+
+// clang-format on
+
+} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 7d7bb8687..6b3639008 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -11,6 +11,7 @@
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
#include "core/hle/service/nvdrv/nvdrv.h"
@@ -33,21 +34,21 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> i
case 'A':
switch (command.cmd) {
case 0x1:
- return BindChannel(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::BindChannel, input, output);
case 0x2:
- return AllocateSpace(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::AllocateSpace, input, output);
case 0x3:
- return FreeSpace(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::FreeSpace, input, output);
case 0x5:
- return UnmapBuffer(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output);
case 0x6:
- return MapBufferEx(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output);
case 0x8:
- return GetVARegions(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::GetVARegions1, input, output);
case 0x9:
- return AllocAsEx(input, output);
+ return WrapFixed(this, &nvhost_as_gpu::AllocAsEx, input, output);
case 0x14:
- return Remap(input, output);
+ return WrapVariable(this, &nvhost_as_gpu::Remap, input, output);
default:
break;
}
@@ -72,7 +73,8 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
case 'A':
switch (command.cmd) {
case 0x8:
- return GetVARegions(input, output, inline_output);
+ return WrapFixedInlOut(this, &nvhost_as_gpu::GetVARegions3, input, output,
+ inline_output);
default:
break;
}
@@ -87,10 +89,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocAsEx params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
std::scoped_lock lock(mutex);
@@ -141,10 +140,7 @@ NvResult nvhost_as_gpu::AllocAsEx(std::span<const u8> input, std::span<u8> outpu
return NvResult::Success;
}
-NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocSpace params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
params.page_size, params.flags);
@@ -194,7 +190,6 @@ NvResult nvhost_as_gpu::AllocateSpace(std::span<const u8> input, std::span<u8> o
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
};
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
@@ -222,10 +217,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
mapping_map.erase(offset);
}
-NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> output) {
- IoctlFreeSpace params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
params.pages, params.page_size);
@@ -264,18 +256,11 @@ NvResult nvhost_as_gpu::FreeSpace(std::span<const u8> input, std::span<u8> outpu
return NvResult::BadValue;
}
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
- const auto num_entries = input.size() / sizeof(IoctlRemapEntry);
-
- LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
-
- std::scoped_lock lock(mutex);
- entries.resize_destructive(num_entries);
- std::memcpy(entries.data(), input.data(), input.size());
+NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
+ LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size());
if (!vm.initialised) {
return NvResult::BadValue;
@@ -317,14 +302,10 @@ NvResult nvhost_as_gpu::Remap(std::span<const u8> input, std::span<u8> output) {
}
}
- std::memcpy(output.data(), entries.data(), output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> output) {
- IoctlMapBufferEx params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
LOG_DEBUG(Service_NVDRV,
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
", offset={}",
@@ -421,14 +402,10 @@ NvResult nvhost_as_gpu::MapBufferEx(std::span<const u8> input, std::span<u8> out
mapping_map[params.offset] = mapping;
}
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> output) {
- IoctlUnmapBuffer params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
std::scoped_lock lock(mutex);
@@ -464,9 +441,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(std::span<const u8> input, std::span<u8> out
return NvResult::Success;
}
-NvResult nvhost_as_gpu::BindChannel(std::span<const u8> input, std::span<u8> output) {
- IoctlBindChannel params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_as_gpu::BindChannel(IoctlBindChannel& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
@@ -493,10 +468,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
};
}
-NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output) {
- IoctlGetVaRegions params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::GetVARegions1(IoctlGetVaRegions& params) {
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
params.buf_size);
@@ -508,15 +480,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
GetVARegionsImpl(params);
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output) {
- IoctlGetVaRegions params{};
- std::memcpy(&params, input.data(), input.size());
-
+NvResult nvhost_as_gpu::GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions) {
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
params.buf_size);
@@ -528,9 +495,10 @@ NvResult nvhost_as_gpu::GetVARegions(std::span<const u8> input, std::span<u8> ou
GetVARegionsImpl(params);
- std::memcpy(output.data(), &params, output.size());
- std::memcpy(inline_output.data(), &params.regions[0], sizeof(VaRegion));
- std::memcpy(inline_output.data() + sizeof(VaRegion), &params.regions[1], sizeof(VaRegion));
+ const size_t num_regions = std::min(params.regions.size(), regions.size());
+ for (size_t i = 0; i < num_regions; i++) {
+ regions[i] = params.regions[i];
+ }
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 2af3e1260..932997e75 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -139,18 +139,17 @@ private:
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
"IoctlGetVaRegions is incorrect size");
- NvResult AllocAsEx(std::span<const u8> input, std::span<u8> output);
- NvResult AllocateSpace(std::span<const u8> input, std::span<u8> output);
- NvResult Remap(std::span<const u8> input, std::span<u8> output);
- NvResult MapBufferEx(std::span<const u8> input, std::span<u8> output);
- NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output);
- NvResult FreeSpace(std::span<const u8> input, std::span<u8> output);
- NvResult BindChannel(std::span<const u8> input, std::span<u8> output);
+ NvResult AllocAsEx(IoctlAllocAsEx& params);
+ NvResult AllocateSpace(IoctlAllocSpace& params);
+ NvResult Remap(std::span<IoctlRemapEntry> params);
+ NvResult MapBufferEx(IoctlMapBufferEx& params);
+ NvResult UnmapBuffer(IoctlUnmapBuffer& params);
+ NvResult FreeSpace(IoctlFreeSpace& params);
+ NvResult BindChannel(IoctlBindChannel& params);
void GetVARegionsImpl(IoctlGetVaRegions& params);
- NvResult GetVARegions(std::span<const u8> input, std::span<u8> output);
- NvResult GetVARegions(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output);
+ NvResult GetVARegions1(IoctlGetVaRegions& params);
+ NvResult GetVARegions3(IoctlGetVaRegions& params, std::span<VaRegion> regions);
void FreeMappingLocked(u64 offset);
@@ -213,7 +212,6 @@ private:
bool initialised{};
} vm;
std::shared_ptr<Tegra::MemoryManager> gmmu;
- Common::ScratchBuffer<IoctlRemapEntry> entries;
// s32 channel{};
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index 4d55554b4..b8dd34e24 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -14,6 +14,7 @@
#include "core/hle/kernel/k_event.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
#include "video_core/gpu.h"
#include "video_core/host1x/host1x.h"
@@ -40,19 +41,19 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inp
case 0x0:
switch (command.cmd) {
case 0x1b:
- return NvOsGetConfigU32(input, output);
+ return WrapFixed(this, &nvhost_ctrl::NvOsGetConfigU32, input, output);
case 0x1c:
- return IocCtrlClearEventWait(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlClearEventWait, input, output);
case 0x1d:
- return IocCtrlEventWait(input, output, true);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, true);
case 0x1e:
- return IocCtrlEventWait(input, output, false);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventWait, input, output, false);
case 0x1f:
- return IocCtrlEventRegister(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventRegister, input, output);
case 0x20:
- return IocCtrlEventUnregister(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregister, input, output);
case 0x21:
- return IocCtrlEventUnregisterBatch(input, output);
+ return WrapFixed(this, &nvhost_ctrl::IocCtrlEventUnregisterBatch, input, output);
}
break;
default:
@@ -79,25 +80,19 @@ void nvhost_ctrl::OnOpen(DeviceFD fd) {}
void nvhost_ctrl::OnClose(DeviceFD fd) {}
-NvResult nvhost_ctrl::NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output) {
- IocGetConfigParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::NvOsGetConfigU32(IocGetConfigParams& params) {
LOG_TRACE(Service_NVDRV, "called, setting={}!{}", params.domain_str.data(),
params.param_str.data());
return NvResult::ConfigVarNotFound; // Returns error on production mode
}
-NvResult nvhost_ctrl::IocCtrlEventWait(std::span<const u8> input, std::span<u8> output,
- bool is_allocation) {
- IocCtrlEventWaitParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation) {
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
params.fence.id, params.fence.value, params.timeout, is_allocation);
bool must_unmark_fail = !is_allocation;
const u32 event_id = params.value.raw;
SCOPE_EXIT({
- std::memcpy(output.data(), &params, sizeof(params));
if (must_unmark_fail) {
events[event_id].fails = 0;
}
@@ -231,9 +226,7 @@ NvResult nvhost_ctrl::FreeEvent(u32 slot) {
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventRegisterParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventRegister(IocCtrlEventRegisterParams& params) {
const u32 event_id = params.user_event_id;
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
if (event_id >= MaxNvEvents) {
@@ -252,9 +245,7 @@ NvResult nvhost_ctrl::IocCtrlEventRegister(std::span<const u8> input, std::span<
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventUnregisterParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params) {
const u32 event_id = params.user_event_id & 0x00FF;
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
@@ -262,9 +253,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(std::span<const u8> input, std::spa
return FreeEvent(event_id);
}
-NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventUnregisterBatchParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params) {
u64 event_mask = params.user_events;
LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
@@ -280,10 +269,7 @@ NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(std::span<const u8> input, std
return NvResult::Success;
}
-NvResult nvhost_ctrl::IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output) {
- IocCtrlEventClearParams params{};
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvhost_ctrl::IocCtrlClearEventWait(IocCtrlEventClearParams& params) {
u32 event_id = params.event_id.slot;
LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
index 2efed4862..992124b60 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
@@ -186,12 +186,12 @@ private:
static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
"IocCtrlEventKill is incorrect size");
- NvResult NvOsGetConfigU32(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlEventWait(std::span<const u8> input, std::span<u8> output, bool is_allocation);
- NvResult IocCtrlEventRegister(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlEventUnregister(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlEventUnregisterBatch(std::span<const u8> input, std::span<u8> output);
- NvResult IocCtrlClearEventWait(std::span<const u8> input, std::span<u8> output);
+ NvResult NvOsGetConfigU32(IocGetConfigParams& params);
+ NvResult IocCtrlEventRegister(IocCtrlEventRegisterParams& params);
+ NvResult IocCtrlEventUnregister(IocCtrlEventUnregisterParams& params);
+ NvResult IocCtrlEventUnregisterBatch(IocCtrlEventUnregisterBatchParams& params);
+ NvResult IocCtrlEventWait(IocCtrlEventWaitParams& params, bool is_allocation);
+ NvResult IocCtrlClearEventWait(IocCtrlEventClearParams& params);
NvResult FreeEvent(u32 slot);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 6081d92e9..61a2df121 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -6,6 +6,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
#include "core/hle/service/nvdrv/nvdrv.h"
@@ -27,23 +28,23 @@ NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8>
case 'G':
switch (command.cmd) {
case 0x1:
- return ZCullGetCtxSize(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetCtxSize, input, output);
case 0x2:
- return ZCullGetInfo(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZCullGetInfo, input, output);
case 0x3:
- return ZBCSetTable(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZBCSetTable, input, output);
case 0x4:
- return ZBCQueryTable(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::ZBCQueryTable, input, output);
case 0x5:
- return GetCharacteristics(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetCharacteristics1, input, output);
case 0x6:
- return GetTPCMasks(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetTPCMasks1, input, output);
case 0x7:
- return FlushL2(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::FlushL2, input, output);
case 0x14:
- return GetActiveSlotMask(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetActiveSlotMask, input, output);
case 0x1c:
- return GetGpuTime(input, output);
+ return WrapFixed(this, &nvhost_ctrl_gpu::GetGpuTime, input, output);
default:
break;
}
@@ -65,9 +66,11 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
case 'G':
switch (command.cmd) {
case 0x5:
- return GetCharacteristics(input, output, inline_output);
+ return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetCharacteristics3, input, output,
+ inline_output);
case 0x6:
- return GetTPCMasks(input, output, inline_output);
+ return WrapFixedInlOut(this, &nvhost_ctrl_gpu::GetTPCMasks3, input, output,
+ inline_output);
default:
break;
}
@@ -82,10 +85,8 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
LOG_DEBUG(Service_NVDRV, "called");
- IoctlCharacteristics params{};
- std::memcpy(&params, input.data(), input.size());
params.gc.arch = 0x120;
params.gc.impl = 0xb;
params.gc.rev = 0xa1;
@@ -123,15 +124,13 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa
params.gc.gr_compbit_store_base_hw = 0x0;
params.gpu_characteristics_buf_size = 0xA0;
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output) {
+NvResult nvhost_ctrl_gpu::GetCharacteristics3(
+ IoctlCharacteristics& params, std::span<IoctlGpuCharacteristics> gpu_characteristics) {
LOG_DEBUG(Service_NVDRV, "called");
- IoctlCharacteristics params{};
- std::memcpy(&params, input.data(), input.size());
+
params.gc.arch = 0x120;
params.gc.impl = 0xb;
params.gc.rev = 0xa1;
@@ -169,70 +168,47 @@ NvResult nvhost_ctrl_gpu::GetCharacteristics(std::span<const u8> input, std::spa
params.gc.gr_compbit_store_base_hw = 0x0;
params.gpu_characteristics_buf_size = 0xA0;
params.gpu_characteristics_buf_addr = 0xdeadbeef; // Cannot be 0 (UNUSED)
-
- std::memcpy(output.data(), &params, output.size());
- std::memcpy(inline_output.data(), &params.gc, inline_output.size());
+ if (!gpu_characteristics.empty()) {
+ gpu_characteristics.front() = params.gc;
+ }
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output) {
- IoctlGpuGetTpcMasksArgs params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_ctrl_gpu::GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params) {
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
if (params.mask_buffer_size != 0) {
params.tcp_mask = 3;
}
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetTPCMasks(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output) {
- IoctlGpuGetTpcMasksArgs params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_ctrl_gpu::GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask) {
LOG_DEBUG(Service_NVDRV, "called, mask_buffer_size=0x{:X}", params.mask_buffer_size);
if (params.mask_buffer_size != 0) {
params.tcp_mask = 3;
}
- std::memcpy(output.data(), &params, output.size());
- std::memcpy(inline_output.data(), &params.tcp_mask, inline_output.size());
+ if (!tpc_mask.empty()) {
+ tpc_mask.front() = params.tcp_mask;
+ }
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetActiveSlotMask(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::GetActiveSlotMask(IoctlActiveSlotMask& params) {
LOG_DEBUG(Service_NVDRV, "called");
- IoctlActiveSlotMask params{};
- if (input.size() > 0) {
- std::memcpy(&params, input.data(), input.size());
- }
params.slot = 0x07;
params.mask = 0x01;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZCullGetCtxSize(IoctlZcullGetCtxSize& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlZcullGetCtxSize params{};
- if (input.size() > 0) {
- std::memcpy(&params, input.data(), input.size());
- }
params.size = 0x1;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlNvgpuGpuZcullGetInfoArgs params{};
-
- if (input.size() > 0) {
- std::memcpy(&params, input.data(), input.size());
- }
-
params.width_align_pixels = 0x20;
params.height_align_pixels = 0x20;
params.pixel_squares_by_aliquots = 0x400;
@@ -243,53 +219,28 @@ NvResult nvhost_ctrl_gpu::ZCullGetInfo(std::span<const u8> input, std::span<u8>
params.subregion_width_align_pixels = 0x20;
params.subregion_height_align_pixels = 0x40;
params.subregion_count = 0x10;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZBCSetTable(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZBCSetTable(IoctlZbcSetTable& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
-
- IoctlZbcSetTable params{};
- std::memcpy(&params, input.data(), input.size());
// TODO(ogniK): What does this even actually do?
-
- // Prevent null pointer being passed as arg 1
- if (output.empty()) {
- LOG_WARNING(Service_NVDRV, "Avoiding passing null pointer to memcpy");
- } else {
- std::memcpy(output.data(), &params, output.size());
- }
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::ZBCQueryTable(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::ZBCQueryTable(IoctlZbcQueryTable& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
-
- IoctlZbcQueryTable params{};
- std::memcpy(&params, input.data(), input.size());
- // TODO : To implement properly
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::FlushL2(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::FlushL2(IoctlFlushL2& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
-
- IoctlFlushL2 params{};
- std::memcpy(&params, input.data(), input.size());
- // TODO : To implement properly
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_ctrl_gpu::GetGpuTime(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_ctrl_gpu::GetGpuTime(IoctlGetGpuTime& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlGetGpuTime params{};
- std::memcpy(&params, input.data(), input.size());
params.gpu_time = static_cast<u64_le>(system.CoreTiming().GetGlobalTimeNs().count());
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 97995551c..d170299bd 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -151,21 +151,20 @@ private:
};
static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
- NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output);
- NvResult GetCharacteristics(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output);
-
- NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output);
- NvResult GetTPCMasks(std::span<const u8> input, std::span<u8> output,
- std::span<u8> inline_output);
-
- NvResult GetActiveSlotMask(std::span<const u8> input, std::span<u8> output);
- NvResult ZCullGetCtxSize(std::span<const u8> input, std::span<u8> output);
- NvResult ZCullGetInfo(std::span<const u8> input, std::span<u8> output);
- NvResult ZBCSetTable(std::span<const u8> input, std::span<u8> output);
- NvResult ZBCQueryTable(std::span<const u8> input, std::span<u8> output);
- NvResult FlushL2(std::span<const u8> input, std::span<u8> output);
- NvResult GetGpuTime(std::span<const u8> input, std::span<u8> output);
+ NvResult GetCharacteristics1(IoctlCharacteristics& params);
+ NvResult GetCharacteristics3(IoctlCharacteristics& params,
+ std::span<IoctlGpuCharacteristics> gpu_characteristics);
+
+ NvResult GetTPCMasks1(IoctlGpuGetTpcMasksArgs& params);
+ NvResult GetTPCMasks3(IoctlGpuGetTpcMasksArgs& params, std::span<u32> tpc_mask);
+
+ NvResult GetActiveSlotMask(IoctlActiveSlotMask& params);
+ NvResult ZCullGetCtxSize(IoctlZcullGetCtxSize& params);
+ NvResult ZCullGetInfo(IoctlNvgpuGpuZcullGetInfoArgs& params);
+ NvResult ZBCSetTable(IoctlZbcSetTable& params);
+ NvResult ZBCQueryTable(IoctlZbcQueryTable& params);
+ NvResult FlushL2(IoctlFlushL2& params);
+ NvResult GetGpuTime(IoctlGetGpuTime& params);
EventInterface& events_interface;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index 46a25fcab..b0395c2f0 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -8,6 +8,7 @@
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/memory.h"
@@ -52,7 +53,7 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 0x0:
switch (command.cmd) {
case 0x3:
- return GetWaitbase(input, output);
+ return WrapFixed(this, &nvhost_gpu::GetWaitbase, input, output);
default:
break;
}
@@ -60,25 +61,25 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetNVMAPfd, input, output);
case 0x3:
- return ChannelSetTimeout(input, output);
+ return WrapFixed(this, &nvhost_gpu::ChannelSetTimeout, input, output);
case 0x8:
- return SubmitGPFIFOBase(input, output, false);
+ return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, false);
case 0x9:
- return AllocateObjectContext(input, output);
+ return WrapFixed(this, &nvhost_gpu::AllocateObjectContext, input, output);
case 0xb:
- return ZCullBind(input, output);
+ return WrapFixed(this, &nvhost_gpu::ZCullBind, input, output);
case 0xc:
- return SetErrorNotifier(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetErrorNotifier, input, output);
case 0xd:
- return SetChannelPriority(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetChannelPriority, input, output);
case 0x1a:
- return AllocGPFIFOEx2(input, output);
+ return WrapFixed(this, &nvhost_gpu::AllocGPFIFOEx2, input, output);
case 0x1b:
- return SubmitGPFIFOBase(input, output, true);
+ return WrapFixedVariable(this, &nvhost_gpu::SubmitGPFIFOBase1, input, output, true);
case 0x1d:
- return ChannelSetTimeslice(input, output);
+ return WrapFixed(this, &nvhost_gpu::ChannelSetTimeslice, input, output);
default:
break;
}
@@ -86,9 +87,9 @@ NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'G':
switch (command.cmd) {
case 0x14:
- return SetClientData(input, output);
+ return WrapFixed(this, &nvhost_gpu::SetClientData, input, output);
case 0x15:
- return GetClientData(input, output);
+ return WrapFixed(this, &nvhost_gpu::GetClientData, input, output);
default:
break;
}
@@ -104,7 +105,8 @@ NvResult nvhost_gpu::Ioctl2(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'H':
switch (command.cmd) {
case 0x1b:
- return SubmitGPFIFOBase(input, inline_input, output);
+ return WrapFixedInlIn(this, &nvhost_gpu::SubmitGPFIFOBase2, input, inline_input,
+ output);
}
break;
}
@@ -121,63 +123,45 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
void nvhost_gpu::OnOpen(DeviceFD fd) {}
void nvhost_gpu::OnClose(DeviceFD fd) {}
-NvResult nvhost_gpu::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) {
- IoctlSetNvmapFD params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
nvmap_fd = params.nvmap_fd;
return NvResult::Success;
}
-NvResult nvhost_gpu::SetClientData(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_gpu::SetClientData(IoctlClientData& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlClientData params{};
- std::memcpy(&params, input.data(), input.size());
user_data = params.data;
return NvResult::Success;
}
-NvResult nvhost_gpu::GetClientData(std::span<const u8> input, std::span<u8> output) {
+NvResult nvhost_gpu::GetClientData(IoctlClientData& params) {
LOG_DEBUG(Service_NVDRV, "called");
-
- IoctlClientData params{};
- std::memcpy(&params, input.data(), input.size());
params.data = user_data;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::ZCullBind(std::span<const u8> input, std::span<u8> output) {
- std::memcpy(&zcull_params, input.data(), input.size());
+NvResult nvhost_gpu::ZCullBind(IoctlZCullBind& params) {
+ zcull_params = params;
LOG_DEBUG(Service_NVDRV, "called, gpu_va={:X}, mode={:X}", zcull_params.gpu_va,
zcull_params.mode);
-
- std::memcpy(output.data(), &zcull_params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::SetErrorNotifier(std::span<const u8> input, std::span<u8> output) {
- IoctlSetErrorNotifier params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::SetErrorNotifier(IoctlSetErrorNotifier& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called, offset={:X}, size={:X}, mem={:X}", params.offset,
params.size, params.mem);
-
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::SetChannelPriority(std::span<const u8> input, std::span<u8> output) {
- std::memcpy(&channel_priority, input.data(), input.size());
+NvResult nvhost_gpu::SetChannelPriority(IoctlChannelSetPriority& params) {
+ channel_priority = params.priority;
LOG_DEBUG(Service_NVDRV, "(STUBBED) called, priority={:X}", channel_priority);
-
return NvResult::Success;
}
-NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocGpfifoEx2 params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params) {
LOG_WARNING(Service_NVDRV,
"(STUBBED) called, num_entries={:X}, flags={:X}, unk0={:X}, "
"unk1={:X}, unk2={:X}, unk3={:X}",
@@ -193,18 +177,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> out
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::AllocateObjectContext(std::span<const u8> input, std::span<u8> output) {
- IoctlAllocObjCtx params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_gpu::AllocateObjectContext(IoctlAllocObjCtx& params) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called, class_num={:X}, flags={:X}", params.class_num,
params.flags);
params.obj_id = 0x0;
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
@@ -248,8 +228,7 @@ static boost::container::small_vector<Tegra::CommandHeader, 512> BuildIncrementW
return result;
}
-NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output,
- Tegra::CommandList&& entries) {
+NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries) {
LOG_TRACE(Service_NVDRV, "called, gpfifo={:X}, num_entries={:X}, flags={:X}", params.address,
params.num_entries, params.flags.raw);
@@ -290,65 +269,55 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> o
flags.raw = 0;
- std::memcpy(output.data(), &params, sizeof(IoctlSubmitGpfifo));
return NvResult::Success;
}
-NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output,
- bool kickoff) {
- if (input.size() < sizeof(IoctlSubmitGpfifo)) {
+NvResult nvhost_gpu::SubmitGPFIFOBase1(IoctlSubmitGpfifo& params,
+ std::span<Tegra::CommandListHeader> commands, bool kickoff) {
+ if (params.num_entries > commands.size()) {
UNIMPLEMENTED();
return NvResult::InvalidSize;
}
- IoctlSubmitGpfifo params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSubmitGpfifo));
- Tegra::CommandList entries(params.num_entries);
+ Tegra::CommandList entries(params.num_entries);
if (kickoff) {
system.ApplicationMemory().ReadBlock(params.address, entries.command_lists.data(),
params.num_entries * sizeof(Tegra::CommandListHeader));
} else {
- std::memcpy(entries.command_lists.data(), &input[sizeof(IoctlSubmitGpfifo)],
+ std::memcpy(entries.command_lists.data(), commands.data(),
params.num_entries * sizeof(Tegra::CommandListHeader));
}
- return SubmitGPFIFOImpl(params, output, std::move(entries));
+ return SubmitGPFIFOImpl(params, std::move(entries));
}
-NvResult nvhost_gpu::SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
- std::span<u8> output) {
- if (input.size() < sizeof(IoctlSubmitGpfifo)) {
+NvResult nvhost_gpu::SubmitGPFIFOBase2(IoctlSubmitGpfifo& params,
+ std::span<const Tegra::CommandListHeader> commands) {
+ if (params.num_entries > commands.size()) {
UNIMPLEMENTED();
return NvResult::InvalidSize;
}
- IoctlSubmitGpfifo params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSubmitGpfifo));
+
Tegra::CommandList entries(params.num_entries);
- std::memcpy(entries.command_lists.data(), input_inline.data(), input_inline.size());
- return SubmitGPFIFOImpl(params, output, std::move(entries));
+ std::memcpy(entries.command_lists.data(), commands.data(),
+ params.num_entries * sizeof(Tegra::CommandListHeader));
+ return SubmitGPFIFOImpl(params, std::move(entries));
}
-NvResult nvhost_gpu::GetWaitbase(std::span<const u8> input, std::span<u8> output) {
- IoctlGetWaitbase params{};
- std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
+NvResult nvhost_gpu::GetWaitbase(IoctlGetWaitbase& params) {
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
params.value = 0; // Seems to be hard coded at 0
- std::memcpy(output.data(), &params, output.size());
return NvResult::Success;
}
-NvResult nvhost_gpu::ChannelSetTimeout(std::span<const u8> input, std::span<u8> output) {
- IoctlChannelSetTimeout params{};
- std::memcpy(&params, input.data(), sizeof(IoctlChannelSetTimeout));
+NvResult nvhost_gpu::ChannelSetTimeout(IoctlChannelSetTimeout& params) {
LOG_INFO(Service_NVDRV, "called, timeout=0x{:X}", params.timeout);
return NvResult::Success;
}
-NvResult nvhost_gpu::ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output) {
- IoctlSetTimeslice params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSetTimeslice));
+NvResult nvhost_gpu::ChannelSetTimeslice(IoctlSetTimeslice& params) {
LOG_INFO(Service_NVDRV, "called, timeslice=0x{:X}", params.timeslice);
channel_timeslice = params.timeslice;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 529c20526..88fd228ff 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -186,23 +186,24 @@ private:
u32_le channel_priority{};
u32_le channel_timeslice{};
- NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output);
- NvResult SetClientData(std::span<const u8> input, std::span<u8> output);
- NvResult GetClientData(std::span<const u8> input, std::span<u8> output);
- NvResult ZCullBind(std::span<const u8> input, std::span<u8> output);
- NvResult SetErrorNotifier(std::span<const u8> input, std::span<u8> output);
- NvResult SetChannelPriority(std::span<const u8> input, std::span<u8> output);
- NvResult AllocGPFIFOEx2(std::span<const u8> input, std::span<u8> output);
- NvResult AllocateObjectContext(std::span<const u8> input, std::span<u8> output);
- NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::span<u8> output,
- Tegra::CommandList&& entries);
- NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<u8> output,
- bool kickoff = false);
- NvResult SubmitGPFIFOBase(std::span<const u8> input, std::span<const u8> input_inline,
- std::span<u8> output);
- NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output);
- NvResult ChannelSetTimeout(std::span<const u8> input, std::span<u8> output);
- NvResult ChannelSetTimeslice(std::span<const u8> input, std::span<u8> output);
+ NvResult SetNVMAPfd(IoctlSetNvmapFD& params);
+ NvResult SetClientData(IoctlClientData& params);
+ NvResult GetClientData(IoctlClientData& params);
+ NvResult ZCullBind(IoctlZCullBind& params);
+ NvResult SetErrorNotifier(IoctlSetErrorNotifier& params);
+ NvResult SetChannelPriority(IoctlChannelSetPriority& params);
+ NvResult AllocGPFIFOEx2(IoctlAllocGpfifoEx2& params);
+ NvResult AllocateObjectContext(IoctlAllocObjCtx& params);
+
+ NvResult SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, Tegra::CommandList&& entries);
+ NvResult SubmitGPFIFOBase1(IoctlSubmitGpfifo& params,
+ std::span<Tegra::CommandListHeader> commands, bool kickoff = false);
+ NvResult SubmitGPFIFOBase2(IoctlSubmitGpfifo& params,
+ std::span<const Tegra::CommandListHeader> commands);
+
+ NvResult GetWaitbase(IoctlGetWaitbase& params);
+ NvResult ChannelSetTimeout(IoctlChannelSetTimeout& params);
+ NvResult ChannelSetTimeslice(IoctlSetTimeslice& params);
EventInterface& events_interface;
NvCore::Container& core;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index a174442a6..f43914e1b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -6,6 +6,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
#include "video_core/renderer_base.h"
@@ -25,18 +26,18 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
}
- return Submit(fd, input, output);
+ return WrapFixedVariable(this, &nvhost_nvdec::Submit, input, output, fd);
}
case 0x2:
- return GetSyncpoint(input, output);
+ return WrapFixed(this, &nvhost_nvdec::GetSyncpoint, input, output);
case 0x3:
- return GetWaitbase(input, output);
+ return WrapFixed(this, &nvhost_nvdec::GetWaitbase, input, output);
case 0x7:
- return SetSubmitTimeout(input, output);
+ return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
case 0x9:
- return MapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
case 0xa:
- return UnmapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
default:
break;
}
@@ -44,7 +45,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input);
+ return WrapFixed(this, &nvhost_nvdec::SetNVMAPfd, input, output);
default:
break;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 61649aa4a..74c701b95 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -29,6 +29,9 @@ std::size_t SliceVectors(std::span<const u8> input, std::vector<T>& dst, std::si
return 0;
}
const size_t bytes_copied = count * sizeof(T);
+ if (input.size() < offset + bytes_copied) {
+ return 0;
+ }
std::memcpy(dst.data(), input.data() + offset, bytes_copied);
return bytes_copied;
}
@@ -41,6 +44,9 @@ std::size_t WriteVectors(std::span<u8> dst, const std::vector<T>& src, std::size
return 0;
}
const size_t bytes_copied = src.size() * sizeof(T);
+ if (dst.size() < offset + bytes_copied) {
+ return 0;
+ }
std::memcpy(dst.data() + offset, src.data(), bytes_copied);
return bytes_copied;
}
@@ -63,18 +69,14 @@ nvhost_nvdec_common::~nvhost_nvdec_common() {
core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
}
-NvResult nvhost_nvdec_common::SetNVMAPfd(std::span<const u8> input) {
- IoctlSetNvmapFD params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSetNvmapFD));
+NvResult nvhost_nvdec_common::SetNVMAPfd(IoctlSetNvmapFD& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
nvmap_fd = params.nvmap_fd;
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output) {
- IoctlSubmit params{};
- std::memcpy(&params, input.data(), sizeof(IoctlSubmit));
+NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, DeviceFD fd) {
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
// Instantiate param buffers
@@ -85,12 +87,12 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std
std::vector<u32> fence_thresholds(params.fence_count);
// Slice input into their respective buffers
- std::size_t offset = sizeof(IoctlSubmit);
- offset += SliceVectors(input, command_buffers, params.cmd_buffer_count, offset);
- offset += SliceVectors(input, relocs, params.relocation_count, offset);
- offset += SliceVectors(input, reloc_shifts, params.relocation_count, offset);
- offset += SliceVectors(input, syncpt_increments, params.syncpoint_count, offset);
- offset += SliceVectors(input, fence_thresholds, params.fence_count, offset);
+ std::size_t offset = 0;
+ offset += SliceVectors(data, command_buffers, params.cmd_buffer_count, offset);
+ offset += SliceVectors(data, relocs, params.relocation_count, offset);
+ offset += SliceVectors(data, reloc_shifts, params.relocation_count, offset);
+ offset += SliceVectors(data, syncpt_increments, params.syncpoint_count, offset);
+ offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
auto& gpu = system.GPU();
if (gpu.UseNvdec()) {
@@ -108,72 +110,51 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, std::span<const u8> input, std
cmdlist.size() * sizeof(u32));
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
}
- std::memcpy(output.data(), &params, sizeof(IoctlSubmit));
// Some games expect command_buffers to be written back
- offset = sizeof(IoctlSubmit);
- offset += WriteVectors(output, command_buffers, offset);
- offset += WriteVectors(output, relocs, offset);
- offset += WriteVectors(output, reloc_shifts, offset);
- offset += WriteVectors(output, syncpt_increments, offset);
- offset += WriteVectors(output, fence_thresholds, offset);
+ offset = 0;
+ offset += WriteVectors(data, command_buffers, offset);
+ offset += WriteVectors(data, relocs, offset);
+ offset += WriteVectors(data, reloc_shifts, offset);
+ offset += WriteVectors(data, syncpt_increments, offset);
+ offset += WriteVectors(data, fence_thresholds, offset);
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::GetSyncpoint(std::span<const u8> input, std::span<u8> output) {
- IoctlGetSyncpoint params{};
- std::memcpy(&params, input.data(), sizeof(IoctlGetSyncpoint));
+NvResult nvhost_nvdec_common::GetSyncpoint(IoctlGetSyncpoint& params) {
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
-
- // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]};
params.value = channel_syncpoint;
- std::memcpy(output.data(), &params, sizeof(IoctlGetSyncpoint));
-
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::GetWaitbase(std::span<const u8> input, std::span<u8> output) {
- IoctlGetWaitbase params{};
+NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
- std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
params.value = 0; // Seems to be hard coded at 0
- std::memcpy(output.data(), &params, sizeof(IoctlGetWaitbase));
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::MapBuffer(std::span<const u8> input, std::span<u8> output) {
- IoctlMapBuffer params{};
- std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
- std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
-
- SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
-
- for (auto& cmd_buffer : cmd_buffer_handles) {
- cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
+NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
+ const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
+ for (size_t i = 0; i < num_entries; i++) {
+ entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
}
- std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer));
- std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
- cmd_buffer_handles.size() * sizeof(MapBufferEntry));
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::UnmapBuffer(std::span<const u8> input, std::span<u8> output) {
- IoctlMapBuffer params{};
- std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
- std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
-
- SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
- for (auto& cmd_buffer : cmd_buffer_handles) {
- nvmap.UnpinHandle(cmd_buffer.map_handle);
+NvResult nvhost_nvdec_common::UnmapBuffer(IoctlMapBuffer& params,
+ std::span<MapBufferEntry> entries) {
+ const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
+ for (size_t i = 0; i < num_entries; i++) {
+ nvmap.UnpinHandle(entries[i].map_handle);
+ entries[i] = {};
}
- std::memset(output.data(), 0, output.size());
+ params = {};
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::SetSubmitTimeout(std::span<const u8> input, std::span<u8> output) {
- std::memcpy(&submit_timeout, input.data(), input.size());
+NvResult nvhost_nvdec_common::SetSubmitTimeout(u32 timeout) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index 9bb573bfe..7ce748e18 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -107,13 +107,13 @@ protected:
static_assert(sizeof(IoctlMapBuffer) == 0x0C, "IoctlMapBuffer is incorrect size");
/// Ioctl command implementations
- NvResult SetNVMAPfd(std::span<const u8> input);
- NvResult Submit(DeviceFD fd, std::span<const u8> input, std::span<u8> output);
- NvResult GetSyncpoint(std::span<const u8> input, std::span<u8> output);
- NvResult GetWaitbase(std::span<const u8> input, std::span<u8> output);
- NvResult MapBuffer(std::span<const u8> input, std::span<u8> output);
- NvResult UnmapBuffer(std::span<const u8> input, std::span<u8> output);
- NvResult SetSubmitTimeout(std::span<const u8> input, std::span<u8> output);
+ NvResult SetNVMAPfd(IoctlSetNvmapFD&);
+ NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
+ NvResult GetSyncpoint(IoctlGetSyncpoint& params);
+ NvResult GetWaitbase(IoctlGetWaitbase& params);
+ NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
+ NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
+ NvResult SetSubmitTimeout(u32 timeout);
Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
index a05c8cdae..9e6b86458 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
@@ -5,6 +5,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
namespace Service::Nvidia::Devices {
@@ -18,7 +19,7 @@ NvResult nvhost_nvjpg::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input, output);
+ return WrapFixed(this, &nvhost_nvjpg::SetNVMAPfd, input, output);
default:
break;
}
@@ -46,9 +47,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
-NvResult nvhost_nvjpg::SetNVMAPfd(std::span<const u8> input, std::span<u8> output) {
- IoctlSetNvmapFD params{};
- std::memcpy(&params, input.data(), input.size());
+NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
nvmap_fd = params.nvmap_fd;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
index 5623e0d47..790c97f6a 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
@@ -33,7 +33,7 @@ private:
s32_le nvmap_fd{};
- NvResult SetNVMAPfd(std::span<const u8> input, std::span<u8> output);
+ NvResult SetNVMAPfd(IoctlSetNvmapFD& params);
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index c0b8684c3..87f8d7c22 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -5,6 +5,7 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
#include "video_core/renderer_base.h"
@@ -25,16 +26,16 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
}
- return Submit(fd, input, output);
+ return WrapFixedVariable(this, &nvhost_vic::Submit, input, output, fd);
}
case 0x2:
- return GetSyncpoint(input, output);
+ return WrapFixed(this, &nvhost_vic::GetSyncpoint, input, output);
case 0x3:
- return GetWaitbase(input, output);
+ return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
case 0x9:
- return MapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
case 0xa:
- return UnmapBuffer(input, output);
+ return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
default:
break;
}
@@ -42,7 +43,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 'H':
switch (command.cmd) {
case 0x1:
- return SetNVMAPfd(input);
+ return WrapFixed(this, &nvhost_vic::SetNVMAPfd, input, output);
default:
break;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 968eaa175..71b2e62ec 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -13,6 +13,7 @@
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvdrv/devices/ioctl_serialization.h"
#include "core/hle/service/nvdrv/devices/nvmap.h"
#include "core/memory.h"
@@ -31,17 +32,17 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
case 0x1:
switch (command.cmd) {
case 0x1:
- return IocCreate(input, output);
+ return WrapFixed(this, &nvmap::IocCreate, input, output);
case 0x3:
- return IocFromId(input, output);
+ return WrapFixed(this, &nvmap::IocFromId, input, output);
case 0x4:
- return IocAlloc(input, output);
+ return WrapFixed(this, &nvmap::IocAlloc, input, output);
case 0x5:
- return IocFree(input, output);
+ return WrapFixed(this, &nvmap::IocFree, input, output);
case 0x9:
- return IocParam(input, output);
+ return WrapFixed(this, &nvmap::IocParam, input, output);
case 0xe:
- return IocGetId(input, output);
+ return WrapFixed(this, &nvmap::IocGetId, input, output);
default:
break;
}
@@ -69,9 +70,7 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
void nvmap::OnOpen(DeviceFD fd) {}
void nvmap::OnClose(DeviceFD fd) {}
-NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) {
- IocCreateParams params;
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvmap::IocCreate(IocCreateParams& params) {
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
@@ -85,13 +84,10 @@ NvResult nvmap::IocCreate(std::span<const u8> input, std::span<u8> output) {
params.handle = handle_description->id;
LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) {
- IocAllocParams params;
- std::memcpy(&params, input.data(), sizeof(params));
+NvResult nvmap::IocAlloc(IocAllocParams& params) {
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
if (!params.handle) {
@@ -133,14 +129,10 @@ NvResult nvmap::IocAlloc(std::span<const u8> input, std::span<u8> output) {
handle_description->size,
Kernel::KMemoryPermission::None, true, false)
.IsSuccess());
- std::memcpy(output.data(), &params, sizeof(params));
return result;
}
-NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) {
- IocGetIdParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvmap::IocGetId(IocGetIdParams& params) {
LOG_DEBUG(Service_NVDRV, "called");
// See the comment in FromId for extra info on this function
@@ -157,14 +149,10 @@ NvResult nvmap::IocGetId(std::span<const u8> input, std::span<u8> output) {
}
params.id = handle_description->id;
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) {
- IocFromIdParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvmap::IocFromId(IocFromIdParams& params) {
LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
// Handles and IDs are always the same value in nvmap however IDs can be used globally given the
@@ -188,16 +176,12 @@ NvResult nvmap::IocFromId(std::span<const u8> input, std::span<u8> output) {
return result;
}
params.handle = handle_description->id;
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) {
+NvResult nvmap::IocParam(IocParamParams& params) {
enum class ParamTypes { Size = 1, Alignment = 2, Base = 3, Heap = 4, Kind = 5, Compr = 6 };
- IocParamParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
if (!params.handle) {
@@ -237,14 +221,10 @@ NvResult nvmap::IocParam(std::span<const u8> input, std::span<u8> output) {
return NvResult::BadValue;
}
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
-NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) {
- IocFreeParams params;
- std::memcpy(&params, input.data(), sizeof(params));
-
+NvResult nvmap::IocFree(IocFreeParams& params) {
LOG_DEBUG(Service_NVDRV, "called");
if (!params.handle) {
@@ -267,7 +247,6 @@ NvResult nvmap::IocFree(std::span<const u8> input, std::span<u8> output) {
// This is possible when there's internal dups or other duplicates.
}
- std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Success;
}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index 4c0cc71cd..049c11028 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -99,12 +99,12 @@ public:
};
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
- NvResult IocCreate(std::span<const u8> input, std::span<u8> output);
- NvResult IocAlloc(std::span<const u8> input, std::span<u8> output);
- NvResult IocGetId(std::span<const u8> input, std::span<u8> output);
- NvResult IocFromId(std::span<const u8> input, std::span<u8> output);
- NvResult IocParam(std::span<const u8> input, std::span<u8> output);
- NvResult IocFree(std::span<const u8> input, std::span<u8> output);
+ NvResult IocCreate(IocCreateParams& params);
+ NvResult IocAlloc(IocAllocParams& params);
+ NvResult IocGetId(IocGetIdParams& params);
+ NvResult IocFromId(IocFromIdParams& params);
+ NvResult IocParam(IocParamParams& params);
+ NvResult IocFree(IocFreeParams& params);
private:
/// Id to use for the next handle that is created.
diff --git a/src/core/hle/service/nvnflinger/buffer_item.h b/src/core/hle/service/nvnflinger/buffer_item.h
index 3da8cc3aa..7fd808f54 100644
--- a/src/core/hle/service/nvnflinger/buffer_item.h
+++ b/src/core/hle/service/nvnflinger/buffer_item.h
@@ -15,7 +15,7 @@
namespace Service::android {
-struct GraphicBuffer;
+class GraphicBuffer;
class BufferItem final {
public:
diff --git a/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp
index 51291539d..d91886bed 100644
--- a/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp
+++ b/src/core/hle/service/nvnflinger/buffer_queue_consumer.cpp
@@ -5,7 +5,6 @@
// https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp
#include "common/logging/log.h"
-#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvnflinger/buffer_item.h"
#include "core/hle/service/nvnflinger/buffer_queue_consumer.h"
#include "core/hle/service/nvnflinger/buffer_queue_core.h"
@@ -14,9 +13,8 @@
namespace Service::android {
-BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_,
- Service::Nvidia::NvCore::NvMap& nvmap_)
- : core{std::move(core_)}, slots{core->slots}, nvmap(nvmap_) {}
+BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_)
+ : core{std::move(core_)}, slots{core->slots} {}
BufferQueueConsumer::~BufferQueueConsumer() = default;
@@ -136,8 +134,6 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
slots[slot].buffer_state = BufferState::Free;
- nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), true);
-
listener = core->connected_producer_listener;
LOG_DEBUG(Service_Nvnflinger, "releasing slot {}", slot);
@@ -175,6 +171,25 @@ Status BufferQueueConsumer::Connect(std::shared_ptr<IConsumerListener> consumer_
return Status::NoError;
}
+Status BufferQueueConsumer::Disconnect() {
+ LOG_DEBUG(Service_Nvnflinger, "called");
+
+ std::scoped_lock lock{core->mutex};
+
+ if (core->consumer_listener == nullptr) {
+ LOG_ERROR(Service_Nvnflinger, "no consumer is connected");
+ return Status::BadValue;
+ }
+
+ core->is_abandoned = true;
+ core->consumer_listener = nullptr;
+ core->queue.clear();
+ core->FreeAllBuffersLocked();
+ core->SignalDequeueCondition();
+
+ return Status::NoError;
+}
+
Status BufferQueueConsumer::GetReleasedBuffers(u64* out_slot_mask) {
if (out_slot_mask == nullptr) {
LOG_ERROR(Service_Nvnflinger, "out_slot_mask may not be nullptr");
diff --git a/src/core/hle/service/nvnflinger/buffer_queue_consumer.h b/src/core/hle/service/nvnflinger/buffer_queue_consumer.h
index 50ed0bb5f..0a61e8dbd 100644
--- a/src/core/hle/service/nvnflinger/buffer_queue_consumer.h
+++ b/src/core/hle/service/nvnflinger/buffer_queue_consumer.h
@@ -13,10 +13,6 @@
#include "core/hle/service/nvnflinger/buffer_queue_defs.h"
#include "core/hle/service/nvnflinger/status.h"
-namespace Service::Nvidia::NvCore {
-class NvMap;
-} // namespace Service::Nvidia::NvCore
-
namespace Service::android {
class BufferItem;
@@ -25,19 +21,18 @@ class IConsumerListener;
class BufferQueueConsumer final {
public:
- explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_,
- Service::Nvidia::NvCore::NvMap& nvmap_);
+ explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_);
~BufferQueueConsumer();
Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present);
Status ReleaseBuffer(s32 slot, u64 frame_number, const Fence& release_fence);
Status Connect(std::shared_ptr<IConsumerListener> consumer_listener, bool controlled_by_app);
+ Status Disconnect();
Status GetReleasedBuffers(u64* out_slot_mask);
private:
std::shared_ptr<BufferQueueCore> core;
BufferQueueDefs::SlotsType& slots;
- Service::Nvidia::NvCore::NvMap& nvmap;
};
} // namespace Service::android
diff --git a/src/core/hle/service/nvnflinger/buffer_queue_core.cpp b/src/core/hle/service/nvnflinger/buffer_queue_core.cpp
index 2dbe29616..4ed5e5978 100644
--- a/src/core/hle/service/nvnflinger/buffer_queue_core.cpp
+++ b/src/core/hle/service/nvnflinger/buffer_queue_core.cpp
@@ -14,24 +14,12 @@ BufferQueueCore::BufferQueueCore() = default;
BufferQueueCore::~BufferQueueCore() = default;
-void BufferQueueCore::NotifyShutdown() {
- std::scoped_lock lock{mutex};
-
- is_shutting_down = true;
-
- SignalDequeueCondition();
-}
-
void BufferQueueCore::SignalDequeueCondition() {
dequeue_possible.store(true);
dequeue_condition.notify_all();
}
bool BufferQueueCore::WaitForDequeueCondition(std::unique_lock<std::mutex>& lk) {
- if (is_shutting_down) {
- return false;
- }
-
dequeue_condition.wait(lk, [&] { return dequeue_possible.load(); });
dequeue_possible.store(false);
@@ -41,7 +29,7 @@ bool BufferQueueCore::WaitForDequeueCondition(std::unique_lock<std::mutex>& lk)
s32 BufferQueueCore::GetMinUndequeuedBufferCountLocked(bool async) const {
// If DequeueBuffer is allowed to error out, we don't have to add an extra buffer.
if (!use_async_buffer) {
- return max_acquired_buffer_count;
+ return 0;
}
if (dequeue_buffer_cannot_block || async) {
@@ -52,7 +40,7 @@ s32 BufferQueueCore::GetMinUndequeuedBufferCountLocked(bool async) const {
}
s32 BufferQueueCore::GetMinMaxBufferCountLocked(bool async) const {
- return GetMinUndequeuedBufferCountLocked(async) + 1;
+ return GetMinUndequeuedBufferCountLocked(async);
}
s32 BufferQueueCore::GetMaxBufferCountLocked(bool async) const {
@@ -61,7 +49,7 @@ s32 BufferQueueCore::GetMaxBufferCountLocked(bool async) const {
if (override_max_buffer_count != 0) {
ASSERT(override_max_buffer_count >= min_buffer_count);
- max_buffer_count = override_max_buffer_count;
+ return override_max_buffer_count;
}
// Any buffers that are dequeued by the producer or sitting in the queue waiting to be consumed
diff --git a/src/core/hle/service/nvnflinger/buffer_queue_core.h b/src/core/hle/service/nvnflinger/buffer_queue_core.h
index 9164f08a0..e513d183b 100644
--- a/src/core/hle/service/nvnflinger/buffer_queue_core.h
+++ b/src/core/hle/service/nvnflinger/buffer_queue_core.h
@@ -34,8 +34,6 @@ public:
BufferQueueCore();
~BufferQueueCore();
- void NotifyShutdown();
-
private:
void SignalDequeueCondition();
bool WaitForDequeueCondition(std::unique_lock<std::mutex>& lk);
@@ -74,7 +72,6 @@ private:
u32 transform_hint{};
bool is_allocating{};
mutable std::condition_variable_any is_allocating_condition;
- bool is_shutting_down{};
};
} // namespace Service::android
diff --git a/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp
index dc6917d5d..5d8762d25 100644
--- a/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvnflinger/buffer_queue_producer.cpp
@@ -13,7 +13,6 @@
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/hle_ipc.h"
#include "core/hle/service/kernel_helpers.h"
-#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvnflinger/buffer_queue_core.h"
#include "core/hle/service/nvnflinger/buffer_queue_producer.h"
#include "core/hle/service/nvnflinger/consumer_listener.h"
@@ -134,7 +133,7 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
const s32 max_buffer_count = core->GetMaxBufferCountLocked(async);
if (async && core->override_max_buffer_count) {
if (core->override_max_buffer_count < max_buffer_count) {
- LOG_ERROR(Service_Nvnflinger, "async mode is invalid with buffer count override");
+ *found = BufferQueueCore::INVALID_BUFFER_SLOT;
return Status::BadValue;
}
}
@@ -142,7 +141,8 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
// Free up any buffers that are in slots beyond the max buffer count
for (s32 s = max_buffer_count; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
ASSERT(slots[s].buffer_state == BufferState::Free);
- if (slots[s].graphic_buffer != nullptr) {
+ if (slots[s].graphic_buffer != nullptr && slots[s].buffer_state == BufferState::Free &&
+ !slots[s].is_preallocated) {
core->FreeBufferLocked(s);
*return_flags |= Status::ReleaseAllBuffers;
}
@@ -532,8 +532,6 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
item.is_droppable = core->dequeue_buffer_cannot_block || async;
item.swap_interval = swap_interval;
- nvmap.DuplicateHandle(item.graphic_buffer->BufferId(), true);
-
sticky_transform = sticky_transform_;
if (core->queue.empty()) {
@@ -743,19 +741,13 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
return Status::NoError;
}
- // HACK: We are not Android. Remove handle for items in queue, and clear queue.
- // Allows synchronous destruction of nvmap handles.
- for (auto& item : core->queue) {
- nvmap.FreeHandle(item.graphic_buffer->BufferId(), true);
- }
- core->queue.clear();
-
switch (api) {
case NativeWindowApi::Egl:
case NativeWindowApi::Cpu:
case NativeWindowApi::Media:
case NativeWindowApi::Camera:
if (core->connected_api == api) {
+ core->queue.clear();
core->FreeAllBuffersLocked();
core->connected_producer_listener = nullptr;
core->connected_api = NativeWindowApi::NoConnectedApi;
@@ -784,7 +776,7 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
}
Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
- const std::shared_ptr<GraphicBuffer>& buffer) {
+ const std::shared_ptr<NvGraphicBuffer>& buffer) {
LOG_DEBUG(Service_Nvnflinger, "slot {}", slot);
if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
@@ -795,7 +787,7 @@ Status BufferQueueProducer::SetPreallocatedBuffer(s32 slot,
slots[slot] = {};
slots[slot].fence = Fence::NoFence();
- slots[slot].graphic_buffer = buffer;
+ slots[slot].graphic_buffer = std::make_shared<GraphicBuffer>(nvmap, buffer);
slots[slot].frame_number = 0;
// Most games preallocate a buffer and pass a valid buffer here. However, it is possible for
@@ -838,7 +830,7 @@ void BufferQueueProducer::Transact(HLERequestContext& ctx, TransactionId code, u
}
case TransactionId::SetPreallocatedBuffer: {
const auto slot = parcel_in.Read<s32>();
- const auto buffer = parcel_in.ReadObject<GraphicBuffer>();
+ const auto buffer = parcel_in.ReadObject<NvGraphicBuffer>();
status = SetPreallocatedBuffer(slot, buffer);
break;
@@ -866,7 +858,7 @@ void BufferQueueProducer::Transact(HLERequestContext& ctx, TransactionId code, u
status = RequestBuffer(slot, &buf);
- parcel_out.WriteFlattenedObject(buf);
+ parcel_out.WriteFlattenedObject<NvGraphicBuffer>(buf.get());
break;
}
case TransactionId::QueueBuffer: {
diff --git a/src/core/hle/service/nvnflinger/buffer_queue_producer.h b/src/core/hle/service/nvnflinger/buffer_queue_producer.h
index d4201c104..64c17d56c 100644
--- a/src/core/hle/service/nvnflinger/buffer_queue_producer.h
+++ b/src/core/hle/service/nvnflinger/buffer_queue_producer.h
@@ -38,6 +38,7 @@ namespace Service::android {
class BufferQueueCore;
class IProducerListener;
+struct NvGraphicBuffer;
class BufferQueueProducer final : public IBinder {
public:
@@ -65,7 +66,7 @@ public:
bool producer_controlled_by_app, QueueBufferOutput* output);
Status Disconnect(NativeWindowApi api);
- Status SetPreallocatedBuffer(s32 slot, const std::shared_ptr<GraphicBuffer>& buffer);
+ Status SetPreallocatedBuffer(s32 slot, const std::shared_ptr<NvGraphicBuffer>& buffer);
private:
BufferQueueProducer(const BufferQueueProducer&) = delete;
diff --git a/src/core/hle/service/nvnflinger/buffer_slot.h b/src/core/hle/service/nvnflinger/buffer_slot.h
index d8c9dec3b..d25bca049 100644
--- a/src/core/hle/service/nvnflinger/buffer_slot.h
+++ b/src/core/hle/service/nvnflinger/buffer_slot.h
@@ -13,7 +13,7 @@
namespace Service::android {
-struct GraphicBuffer;
+class GraphicBuffer;
enum class BufferState : u32 {
Free = 0,
diff --git a/src/core/hle/service/nvnflinger/buffer_transform_flags.h b/src/core/hle/service/nvnflinger/buffer_transform_flags.h
index 67aa5dad6..ffe579718 100644
--- a/src/core/hle/service/nvnflinger/buffer_transform_flags.h
+++ b/src/core/hle/service/nvnflinger/buffer_transform_flags.h
@@ -3,6 +3,7 @@
#pragma once
+#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Service::android {
@@ -21,5 +22,6 @@ enum class BufferTransformFlags : u32 {
/// Rotate source image 270 degrees clockwise
Rotate270 = 0x07,
};
+DECLARE_ENUM_FLAG_OPERATORS(BufferTransformFlags);
} // namespace Service::android
diff --git a/src/core/hle/service/nvnflinger/consumer_base.cpp b/src/core/hle/service/nvnflinger/consumer_base.cpp
index 4dcda8dac..1059e72bf 100644
--- a/src/core/hle/service/nvnflinger/consumer_base.cpp
+++ b/src/core/hle/service/nvnflinger/consumer_base.cpp
@@ -27,6 +27,26 @@ void ConsumerBase::Connect(bool controlled_by_app) {
consumer->Connect(shared_from_this(), controlled_by_app);
}
+void ConsumerBase::Abandon() {
+ LOG_DEBUG(Service_Nvnflinger, "called");
+
+ std::scoped_lock lock{mutex};
+
+ if (!is_abandoned) {
+ this->AbandonLocked();
+ is_abandoned = true;
+ }
+}
+
+void ConsumerBase::AbandonLocked() {
+ for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; i++) {
+ this->FreeBufferLocked(i);
+ }
+ // disconnect from the BufferQueue
+ consumer->Disconnect();
+ consumer = nullptr;
+}
+
void ConsumerBase::FreeBufferLocked(s32 slot_index) {
LOG_DEBUG(Service_Nvnflinger, "slot_index={}", slot_index);
diff --git a/src/core/hle/service/nvnflinger/consumer_base.h b/src/core/hle/service/nvnflinger/consumer_base.h
index 264829414..ea3e9e97a 100644
--- a/src/core/hle/service/nvnflinger/consumer_base.h
+++ b/src/core/hle/service/nvnflinger/consumer_base.h
@@ -24,6 +24,7 @@ class BufferQueueConsumer;
class ConsumerBase : public IConsumerListener, public std::enable_shared_from_this<ConsumerBase> {
public:
void Connect(bool controlled_by_app);
+ void Abandon();
protected:
explicit ConsumerBase(std::unique_ptr<BufferQueueConsumer> consumer_);
@@ -34,6 +35,7 @@ protected:
void OnBuffersReleased() override;
void OnSidebandStreamChanged() override;
+ void AbandonLocked();
void FreeBufferLocked(s32 slot_index);
Status AcquireBufferLocked(BufferItem* item, std::chrono::nanoseconds present_when);
Status ReleaseBufferLocked(s32 slot, const std::shared_ptr<GraphicBuffer>& graphic_buffer);
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
index 2e29bc848..d7db24f42 100644
--- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
+++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
@@ -71,24 +71,17 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
R_SUCCEED();
}
-template <typename T>
-std::span<u8> SerializeIoc(T& params) {
- return std::span(reinterpret_cast<u8*>(std::addressof(params)), sizeof(T));
-}
-
Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap, u32 size) {
// Create a handle.
- Nvidia::Devices::nvmap::IocCreateParams create_in_params{
+ Nvidia::Devices::nvmap::IocCreateParams create_params{
.size = size,
.handle = 0,
};
- Nvidia::Devices::nvmap::IocCreateParams create_out_params{};
- R_UNLESS(nvmap.IocCreate(SerializeIoc(create_in_params), SerializeIoc(create_out_params)) ==
- Nvidia::NvResult::Success,
+ R_UNLESS(nvmap.IocCreate(create_params) == Nvidia::NvResult::Success,
VI::ResultOperationFailed);
// Assign the output handle.
- *out_nv_map_handle = create_out_params.handle;
+ *out_nv_map_handle = create_params.handle;
// We succeeded.
R_SUCCEED();
@@ -96,13 +89,10 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
// Free the handle.
- Nvidia::Devices::nvmap::IocFreeParams free_in_params{
+ Nvidia::Devices::nvmap::IocFreeParams free_params{
.handle = handle,
};
- Nvidia::Devices::nvmap::IocFreeParams free_out_params{};
- R_UNLESS(nvmap.IocFree(SerializeIoc(free_in_params), SerializeIoc(free_out_params)) ==
- Nvidia::NvResult::Success,
- VI::ResultOperationFailed);
+ R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
// We succeeded.
R_SUCCEED();
@@ -111,7 +101,7 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
u32 size) {
// Assign the allocated memory to the handle.
- Nvidia::Devices::nvmap::IocAllocParams alloc_in_params{
+ Nvidia::Devices::nvmap::IocAllocParams alloc_params{
.handle = handle,
.heap_mask = 0,
.flags = {},
@@ -119,10 +109,7 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
.kind = 0,
.address = GetInteger(buffer),
};
- Nvidia::Devices::nvmap::IocAllocParams alloc_out_params{};
- R_UNLESS(nvmap.IocAlloc(SerializeIoc(alloc_in_params), SerializeIoc(alloc_out_params)) ==
- Nvidia::NvResult::Success,
- VI::ResultOperationFailed);
+ R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
// We succeeded.
R_SUCCEED();
@@ -179,7 +166,7 @@ constexpr SharedMemoryPoolLayout SharedBufferPoolLayout = [] {
}();
void MakeGraphicBuffer(android::BufferQueueProducer& producer, u32 slot, u32 handle) {
- auto buffer = std::make_shared<android::GraphicBuffer>();
+ auto buffer = std::make_shared<android::NvGraphicBuffer>();
buffer->width = SharedBufferWidth;
buffer->height = SharedBufferHeight;
buffer->stride = SharedBufferBlockLinearStride;
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp
index a07c621d9..0745434c5 100644
--- a/src/core/hle/service/nvnflinger/nvnflinger.cpp
+++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp
@@ -47,7 +47,10 @@ void Nvnflinger::SplitVSync(std::stop_token stop_token) {
vsync_signal.Wait();
const auto lock_guard = Lock();
- Compose();
+
+ if (!is_abandoned) {
+ Compose();
+ }
}
}
@@ -66,7 +69,6 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
"ScreenComposition",
[this](std::uintptr_t, s64 time,
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
- { const auto lock_guard = Lock(); }
vsync_signal.Set();
return std::chrono::nanoseconds(GetNextTicks());
});
@@ -106,11 +108,20 @@ Nvnflinger::~Nvnflinger() {
}
void Nvnflinger::ShutdownLayers() {
- for (auto& display : displays) {
- for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
- display.GetLayer(layer).Core().NotifyShutdown();
+ // Abandon consumers.
+ {
+ const auto lock_guard = Lock();
+ for (auto& display : displays) {
+ for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
+ display.GetLayer(layer).GetConsumer().Abandon();
+ }
}
+
+ is_abandoned = true;
}
+
+ // Join the vsync thread, if it exists.
+ vsync_thread = {};
}
void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
@@ -229,16 +240,6 @@ VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) {
return display->FindLayer(layer_id);
}
-const VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) const {
- const auto* const display = FindDisplay(display_id);
-
- if (display == nullptr) {
- return nullptr;
- }
-
- return display->FindLayer(layer_id);
-}
-
VI::Layer* Nvnflinger::FindOrCreateLayer(u64 display_id, u64 layer_id) {
auto* const display = FindDisplay(display_id);
@@ -288,7 +289,6 @@ void Nvnflinger::Compose() {
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
ASSERT(nvdisp);
- guard->unlock();
Common::Rectangle<int> crop_rect{
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
@@ -299,7 +299,6 @@ void Nvnflinger::Compose() {
buffer.fence.fences, buffer.fence.num_fences);
MicroProfileFlip();
- guard->lock();
swap_interval = buffer.swap_interval;
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.h b/src/core/hle/service/nvnflinger/nvnflinger.h
index 14c783582..f5d73acdb 100644
--- a/src/core/hle/service/nvnflinger/nvnflinger.h
+++ b/src/core/hle/service/nvnflinger/nvnflinger.h
@@ -117,9 +117,6 @@ private:
/// Finds the layer identified by the specified ID in the desired display.
[[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id);
- /// Finds the layer identified by the specified ID in the desired display.
- [[nodiscard]] const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
-
/// Finds the layer identified by the specified ID in the desired display,
/// or creates the layer if it is not found.
/// To be used when the system expects the specified ID to already exist.
@@ -143,6 +140,8 @@ private:
s32 swap_interval = 1;
+ bool is_abandoned = false;
+
/// Event that handles screen composition.
std::shared_ptr<Core::Timing::EventType> multi_composition_event;
std::shared_ptr<Core::Timing::EventType> single_composition_event;
diff --git a/src/core/hle/service/nvnflinger/status.h b/src/core/hle/service/nvnflinger/status.h
index 7af166c40..3fa0fe15b 100644
--- a/src/core/hle/service/nvnflinger/status.h
+++ b/src/core/hle/service/nvnflinger/status.h
@@ -19,7 +19,7 @@ enum class Status : s32 {
Busy = -16,
NoInit = -19,
BadValue = -22,
- InvalidOperation = -37,
+ InvalidOperation = -38,
BufferNeedsReallocation = 1,
ReleaseAllBuffers = 2,
};
diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
new file mode 100644
index 000000000..ce70946ec
--- /dev/null
+++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
@@ -0,0 +1,34 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "core/hle/service/nvdrv/core/nvmap.h"
+#include "core/hle/service/nvnflinger/ui/graphic_buffer.h"
+
+namespace Service::android {
+
+static NvGraphicBuffer GetBuffer(std::shared_ptr<NvGraphicBuffer>& buffer) {
+ if (buffer) {
+ return *buffer;
+ } else {
+ return {};
+ }
+}
+
+GraphicBuffer::GraphicBuffer(u32 width_, u32 height_, PixelFormat format_, u32 usage_)
+ : NvGraphicBuffer(width_, height_, format_, usage_), m_nvmap(nullptr) {}
+
+GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap,
+ std::shared_ptr<NvGraphicBuffer> buffer)
+ : NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) {
+ if (this->BufferId() > 0) {
+ m_nvmap->DuplicateHandle(this->BufferId(), true);
+ }
+}
+
+GraphicBuffer::~GraphicBuffer() {
+ if (m_nvmap != nullptr && this->BufferId() > 0) {
+ m_nvmap->FreeHandle(this->BufferId(), true);
+ }
+}
+
+} // namespace Service::android
diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.h b/src/core/hle/service/nvnflinger/ui/graphic_buffer.h
index 3eac5cedd..da430aa75 100644
--- a/src/core/hle/service/nvnflinger/ui/graphic_buffer.h
+++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.h
@@ -6,16 +6,22 @@
#pragma once
+#include <memory>
+
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hle/service/nvnflinger/pixel_format.h"
+namespace Service::Nvidia::NvCore {
+class NvMap;
+} // namespace Service::Nvidia::NvCore
+
namespace Service::android {
-struct GraphicBuffer final {
- constexpr GraphicBuffer() = default;
+struct NvGraphicBuffer {
+ constexpr NvGraphicBuffer() = default;
- constexpr GraphicBuffer(u32 width_, u32 height_, PixelFormat format_, u32 usage_)
+ constexpr NvGraphicBuffer(u32 width_, u32 height_, PixelFormat format_, u32 usage_)
: width{static_cast<s32>(width_)}, height{static_cast<s32>(height_)}, format{format_},
usage{static_cast<s32>(usage_)} {}
@@ -93,6 +99,17 @@ struct GraphicBuffer final {
u32 offset{};
INSERT_PADDING_WORDS(60);
};
-static_assert(sizeof(GraphicBuffer) == 0x16C, "GraphicBuffer has wrong size");
+static_assert(sizeof(NvGraphicBuffer) == 0x16C, "NvGraphicBuffer has wrong size");
+
+class GraphicBuffer final : public NvGraphicBuffer {
+public:
+ explicit GraphicBuffer(u32 width, u32 height, PixelFormat format, u32 usage);
+ explicit GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap,
+ std::shared_ptr<NvGraphicBuffer> buffer);
+ ~GraphicBuffer();
+
+private:
+ Service::Nvidia::NvCore::NvMap* m_nvmap{};
+};
} // namespace Service::android
diff --git a/src/core/hle/service/pctl/pctl_module.cpp b/src/core/hle/service/pctl/pctl_module.cpp
index 938330dd0..6a7fd72bc 100644
--- a/src/core/hle/service/pctl/pctl_module.cpp
+++ b/src/core/hle/service/pctl/pctl_module.cpp
@@ -141,6 +141,12 @@ public:
service_context.CreateEvent("IParentalControlService::RequestSuspensionEvent");
}
+ ~IParentalControlService() {
+ service_context.CloseEvent(synchronization_event);
+ service_context.CloseEvent(unlinked_event);
+ service_context.CloseEvent(request_suspension_event);
+ };
+
private:
bool CheckFreeCommunicationPermissionImpl() const {
if (states.temporary_unlocked) {
diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp
index f9cf2dda3..d92499f05 100644
--- a/src/core/hle/service/pm/pm.cpp
+++ b/src/core/hle/service/pm/pm.cpp
@@ -37,7 +37,7 @@ std::optional<Kernel::KProcess*> SearchProcessList(
void GetApplicationPidGeneric(HLERequestContext& ctx,
const std::vector<Kernel::KProcess*>& process_list) {
const auto process = SearchProcessList(process_list, [](const auto& proc) {
- return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin;
+ return proc->GetProcessId() == Kernel::KProcess::ProcessIdMin;
});
IPC::ResponseBuilder rb{ctx, 4};
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index ec3af80af..48304e6d1 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -19,19 +19,8 @@
namespace Service::Set {
-namespace {
-constexpr u64 SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET = 0x05;
-
-enum class GetFirmwareVersionType {
- Version1,
- Version2,
-};
-
-void GetFirmwareVersionImpl(Core::System& system, HLERequestContext& ctx,
- GetFirmwareVersionType type) {
- ASSERT_MSG(ctx.GetWriteBufferSize() == 0x100,
- "FirmwareVersion output buffer must be 0x100 bytes in size!");
-
+Result GetFirmwareVersionImpl(FirmwareVersionFormat& out_firmware, Core::System& system,
+ GetFirmwareVersionType type) {
constexpr u64 FirmwareVersionSystemDataId = 0x0100000000000809;
auto& fsc = system.GetFileSystemController();
@@ -45,46 +34,43 @@ void GetFirmwareVersionImpl(Core::System& system, HLERequestContext& ctx,
nca = bis_system->GetEntry(FirmwareVersionSystemDataId, FileSys::ContentRecordType::Data);
}
if (nca) {
- romfs = FileSys::ExtractRomFS(nca->GetRomFS());
+ if (auto nca_romfs = nca->GetRomFS(); nca_romfs) {
+ romfs = FileSys::ExtractRomFS(nca_romfs);
+ }
}
if (!romfs) {
romfs = FileSys::ExtractRomFS(
FileSys::SystemArchive::SynthesizeSystemArchive(FirmwareVersionSystemDataId));
}
- const auto early_exit_failure = [&ctx](std::string_view desc, Result code) {
+ const auto early_exit_failure = [](std::string_view desc, Result code) {
LOG_ERROR(Service_SET, "General failure while attempting to resolve firmware version ({}).",
desc);
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(code);
+ return code;
};
const auto ver_file = romfs->GetFile("file");
if (ver_file == nullptr) {
- early_exit_failure("The system version archive didn't contain the file 'file'.",
- FileSys::ERROR_INVALID_ARGUMENT);
- return;
+ return early_exit_failure("The system version archive didn't contain the file 'file'.",
+ FileSys::ERROR_INVALID_ARGUMENT);
}
auto data = ver_file->ReadAllBytes();
- if (data.size() != 0x100) {
- early_exit_failure("The system version file 'file' was not the correct size.",
- FileSys::ERROR_OUT_OF_BOUNDS);
- return;
+ if (data.size() != sizeof(FirmwareVersionFormat)) {
+ return early_exit_failure("The system version file 'file' was not the correct size.",
+ FileSys::ERROR_OUT_OF_BOUNDS);
}
+ std::memcpy(&out_firmware, data.data(), sizeof(FirmwareVersionFormat));
+
// If the command is GetFirmwareVersion (as opposed to GetFirmwareVersion2), hardware will
// zero out the REVISION_MINOR field.
if (type == GetFirmwareVersionType::Version1) {
- data[SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET] = 0;
+ out_firmware.revision_minor = 0;
}
- ctx.WriteBuffer(data);
-
- IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(ResultSuccess);
+ return ResultSuccess;
}
-} // Anonymous namespace
void SET_SYS::SetLanguageCode(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
@@ -98,12 +84,32 @@ void SET_SYS::SetLanguageCode(HLERequestContext& ctx) {
void SET_SYS::GetFirmwareVersion(HLERequestContext& ctx) {
LOG_DEBUG(Service_SET, "called");
- GetFirmwareVersionImpl(system, ctx, GetFirmwareVersionType::Version1);
+
+ FirmwareVersionFormat firmware_data{};
+ const auto result =
+ GetFirmwareVersionImpl(firmware_data, system, GetFirmwareVersionType::Version1);
+
+ if (result.IsSuccess()) {
+ ctx.WriteBuffer(firmware_data);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
}
void SET_SYS::GetFirmwareVersion2(HLERequestContext& ctx) {
LOG_DEBUG(Service_SET, "called");
- GetFirmwareVersionImpl(system, ctx, GetFirmwareVersionType::Version2);
+
+ FirmwareVersionFormat firmware_data{};
+ const auto result =
+ GetFirmwareVersionImpl(firmware_data, system, GetFirmwareVersionType::Version2);
+
+ if (result.IsSuccess()) {
+ ctx.WriteBuffer(firmware_data);
+ }
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(result);
}
void SET_SYS::GetAccountSettings(HLERequestContext& ctx) {
@@ -431,8 +437,7 @@ void SET_SYS::GetAutoUpdateEnableFlag(HLERequestContext& ctx) {
void SET_SYS::GetBatteryPercentageFlag(HLERequestContext& ctx) {
u8 battery_percentage_flag{1};
- LOG_WARNING(Service_SET, "(STUBBED) called, battery_percentage_flag={}",
- battery_percentage_flag);
+ LOG_DEBUG(Service_SET, "(STUBBED) called, battery_percentage_flag={}", battery_percentage_flag);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
@@ -492,6 +497,29 @@ void SET_SYS::GetChineseTraditionalInputMethod(HLERequestContext& ctx) {
rb.PushEnum(ChineseTraditionalInputMethod::Unknown0);
}
+void SET_SYS::GetHomeMenuScheme(HLERequestContext& ctx) {
+ LOG_DEBUG(Service_SET, "(STUBBED) called");
+
+ const HomeMenuScheme default_color = {
+ .main = 0xFF323232,
+ .back = 0xFF323232,
+ .sub = 0xFFFFFFFF,
+ .bezel = 0xFFFFFFFF,
+ .extra = 0xFF000000,
+ };
+
+ IPC::ResponseBuilder rb{ctx, 7};
+ rb.Push(ResultSuccess);
+ rb.PushRaw(default_color);
+}
+
+void SET_SYS::GetHomeMenuSchemeModel(HLERequestContext& ctx) {
+ LOG_WARNING(Service_SET, "(STUBBED) called");
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(ResultSuccess);
+ rb.Push(0);
+}
void SET_SYS::GetFieldTestingFlag(HLERequestContext& ctx) {
LOG_WARNING(Service_SET, "(STUBBED) called");
@@ -674,7 +702,7 @@ SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} {
{171, nullptr, "SetChineseTraditionalInputMethod"},
{172, nullptr, "GetPtmCycleCountReliability"},
{173, nullptr, "SetPtmCycleCountReliability"},
- {174, nullptr, "GetHomeMenuScheme"},
+ {174, &SET_SYS::GetHomeMenuScheme, "GetHomeMenuScheme"},
{175, nullptr, "GetThemeSettings"},
{176, nullptr, "SetThemeSettings"},
{177, nullptr, "GetThemeKey"},
@@ -685,7 +713,7 @@ SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"} {
{182, nullptr, "SetT"},
{183, nullptr, "GetPlatformRegion"},
{184, nullptr, "SetPlatformRegion"},
- {185, nullptr, "GetHomeMenuSchemeModel"},
+ {185, &SET_SYS::GetHomeMenuSchemeModel, "GetHomeMenuSchemeModel"},
{186, nullptr, "GetMemoryUsageRateFlag"},
{187, nullptr, "GetTouchScreenMode"},
{188, nullptr, "SetTouchScreenMode"},
diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h
index c7dba2a9e..5f770fd32 100644
--- a/src/core/hle/service/set/set_sys.h
+++ b/src/core/hle/service/set/set_sys.h
@@ -4,6 +4,7 @@
#pragma once
#include "common/uuid.h"
+#include "core/hle/result.h"
#include "core/hle/service/service.h"
#include "core/hle/service/time/clock_types.h"
@@ -12,6 +13,29 @@ class System;
}
namespace Service::Set {
+enum class LanguageCode : u64;
+enum class GetFirmwareVersionType {
+ Version1,
+ Version2,
+};
+
+struct FirmwareVersionFormat {
+ u8 major;
+ u8 minor;
+ u8 micro;
+ INSERT_PADDING_BYTES(1);
+ u8 revision_major;
+ u8 revision_minor;
+ INSERT_PADDING_BYTES(2);
+ std::array<char, 0x20> platform;
+ std::array<u8, 0x40> version_hash;
+ std::array<char, 0x18> display_version;
+ std::array<char, 0x80> display_title;
+};
+static_assert(sizeof(FirmwareVersionFormat) == 0x100, "FirmwareVersionFormat is an invalid size");
+
+Result GetFirmwareVersionImpl(FirmwareVersionFormat& out_firmware, Core::System& system,
+ GetFirmwareVersionType type);
class SET_SYS final : public ServiceFramework<SET_SYS> {
public:
@@ -269,6 +293,16 @@ private:
};
static_assert(sizeof(EulaVersion) == 0x30, "EulaVersion is incorrect size");
+ /// This is nn::settings::system::HomeMenuScheme
+ struct HomeMenuScheme {
+ u32 main;
+ u32 back;
+ u32 sub;
+ u32 bezel;
+ u32 extra;
+ };
+ static_assert(sizeof(HomeMenuScheme) == 0x14, "HomeMenuScheme is incorrect size");
+
void SetLanguageCode(HLERequestContext& ctx);
void GetFirmwareVersion(HLERequestContext& ctx);
void GetFirmwareVersion2(HLERequestContext& ctx);
@@ -305,6 +339,8 @@ private:
void GetKeyboardLayout(HLERequestContext& ctx);
void GetChineseTraditionalInputMethod(HLERequestContext& ctx);
void GetFieldTestingFlag(HLERequestContext& ctx);
+ void GetHomeMenuScheme(HLERequestContext& ctx);
+ void GetHomeMenuSchemeModel(HLERequestContext& ctx);
AccountSettings account_settings{
.flags = {},
diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp
index 85849d5f3..dd652ca42 100644
--- a/src/core/hle/service/sockets/bsd.cpp
+++ b/src/core/hle/service/sockets/bsd.cpp
@@ -39,6 +39,18 @@ bool IsConnectionBased(Type type) {
}
}
+template <typename T>
+T GetValue(std::span<const u8> buffer) {
+ T t{};
+ std::memcpy(&t, buffer.data(), std::min(sizeof(T), buffer.size()));
+ return t;
+}
+
+template <typename T>
+void PutValue(std::span<u8> buffer, const T& t) {
+ std::memcpy(buffer.data(), &t, std::min(sizeof(T), buffer.size()));
+}
+
} // Anonymous namespace
void BSD::PollWork::Execute(BSD* bsd) {
@@ -316,22 +328,12 @@ void BSD::SetSockOpt(HLERequestContext& ctx) {
const s32 fd = rp.Pop<s32>();
const u32 level = rp.Pop<u32>();
const OptName optname = static_cast<OptName>(rp.Pop<u32>());
-
- const auto buffer = ctx.ReadBuffer();
- const u8* optval = buffer.empty() ? nullptr : buffer.data();
- size_t optlen = buffer.size();
-
- std::array<u64, 2> values;
- if ((optname == OptName::SNDTIMEO || optname == OptName::RCVTIMEO) && buffer.size() == 8) {
- std::memcpy(values.data(), buffer.data(), sizeof(values));
- optlen = sizeof(values);
- optval = reinterpret_cast<const u8*>(values.data());
- }
+ const auto optval = ctx.ReadBuffer();
LOG_DEBUG(Service, "called. fd={} level={} optname=0x{:x} optlen={}", fd, level,
- static_cast<u32>(optname), optlen);
+ static_cast<u32>(optname), optval.size());
- BuildErrnoResponse(ctx, SetSockOptImpl(fd, level, optname, optlen, optval));
+ BuildErrnoResponse(ctx, SetSockOptImpl(fd, level, optname, optval));
}
void BSD::Shutdown(HLERequestContext& ctx) {
@@ -521,18 +523,19 @@ std::pair<s32, Errno> BSD::SocketImpl(Domain domain, Type type, Protocol protoco
std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::span<const u8> read_buffer,
s32 nfds, s32 timeout) {
- if (write_buffer.size() < nfds * sizeof(PollFD)) {
- return {-1, Errno::INVAL};
- }
-
- if (nfds == 0) {
+ if (nfds <= 0) {
// When no entries are provided, -1 is returned with errno zero
return {-1, Errno::SUCCESS};
}
+ if (read_buffer.size() < nfds * sizeof(PollFD)) {
+ return {-1, Errno::INVAL};
+ }
+ if (write_buffer.size() < nfds * sizeof(PollFD)) {
+ return {-1, Errno::INVAL};
+ }
- const size_t length = std::min(read_buffer.size(), write_buffer.size());
std::vector<PollFD> fds(nfds);
- std::memcpy(fds.data(), read_buffer.data(), length);
+ std::memcpy(fds.data(), read_buffer.data(), nfds * sizeof(PollFD));
if (timeout >= 0) {
const s64 seconds = timeout / 1000;
@@ -580,7 +583,7 @@ std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::span<con
for (size_t i = 0; i < num; ++i) {
fds[i].revents = Translate(host_pollfds[i].revents);
}
- std::memcpy(write_buffer.data(), fds.data(), length);
+ std::memcpy(write_buffer.data(), fds.data(), nfds * sizeof(PollFD));
return Translate(result);
}
@@ -608,8 +611,7 @@ std::pair<s32, Errno> BSD::AcceptImpl(s32 fd, std::vector<u8>& write_buffer) {
new_descriptor.is_connection_based = descriptor.is_connection_based;
const SockAddrIn guest_addr_in = Translate(result.sockaddr_in);
- const size_t length = std::min(sizeof(guest_addr_in), write_buffer.size());
- std::memcpy(write_buffer.data(), &guest_addr_in, length);
+ PutValue(write_buffer, guest_addr_in);
return {new_fd, Errno::SUCCESS};
}
@@ -619,8 +621,7 @@ Errno BSD::BindImpl(s32 fd, std::span<const u8> addr) {
return Errno::BADF;
}
ASSERT(addr.size() == sizeof(SockAddrIn));
- SockAddrIn addr_in;
- std::memcpy(&addr_in, addr.data(), sizeof(addr_in));
+ auto addr_in = GetValue<SockAddrIn>(addr);
return Translate(file_descriptors[fd]->socket->Bind(Translate(addr_in)));
}
@@ -631,8 +632,7 @@ Errno BSD::ConnectImpl(s32 fd, std::span<const u8> addr) {
}
UNIMPLEMENTED_IF(addr.size() != sizeof(SockAddrIn));
- SockAddrIn addr_in;
- std::memcpy(&addr_in, addr.data(), sizeof(addr_in));
+ auto addr_in = GetValue<SockAddrIn>(addr);
return Translate(file_descriptors[fd]->socket->Connect(Translate(addr_in)));
}
@@ -650,7 +650,7 @@ Errno BSD::GetPeerNameImpl(s32 fd, std::vector<u8>& write_buffer) {
ASSERT(write_buffer.size() >= sizeof(guest_addrin));
write_buffer.resize(sizeof(guest_addrin));
- std::memcpy(write_buffer.data(), &guest_addrin, sizeof(guest_addrin));
+ PutValue(write_buffer, guest_addrin);
return Translate(bsd_errno);
}
@@ -667,7 +667,7 @@ Errno BSD::GetSockNameImpl(s32 fd, std::vector<u8>& write_buffer) {
ASSERT(write_buffer.size() >= sizeof(guest_addrin));
write_buffer.resize(sizeof(guest_addrin));
- std::memcpy(write_buffer.data(), &guest_addrin, sizeof(guest_addrin));
+ PutValue(write_buffer, guest_addrin);
return Translate(bsd_errno);
}
@@ -725,7 +725,7 @@ Errno BSD::GetSockOptImpl(s32 fd, u32 level, OptName optname, std::vector<u8>& o
optval.size() == sizeof(Errno), { return Errno::INVAL; },
"Incorrect getsockopt option size");
optval.resize(sizeof(Errno));
- memcpy(optval.data(), &translated_pending_err, sizeof(Errno));
+ PutValue(optval, translated_pending_err);
}
return Translate(getsockopt_err);
}
@@ -735,7 +735,7 @@ Errno BSD::GetSockOptImpl(s32 fd, u32 level, OptName optname, std::vector<u8>& o
}
}
-Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, const void* optval) {
+Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, std::span<const u8> optval) {
if (!IsFileDescriptorValid(fd)) {
return Errno::BADF;
}
@@ -748,17 +748,15 @@ Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, con
Network::SocketBase* const socket = file_descriptors[fd]->socket.get();
if (optname == OptName::LINGER) {
- ASSERT(optlen == sizeof(Linger));
- Linger linger;
- std::memcpy(&linger, optval, sizeof(linger));
+ ASSERT(optval.size() == sizeof(Linger));
+ auto linger = GetValue<Linger>(optval);
ASSERT(linger.onoff == 0 || linger.onoff == 1);
return Translate(socket->SetLinger(linger.onoff != 0, linger.linger));
}
- ASSERT(optlen == sizeof(u32));
- u32 value;
- std::memcpy(&value, optval, sizeof(value));
+ ASSERT(optval.size() == sizeof(u32));
+ auto value = GetValue<u32>(optval);
switch (optname) {
case OptName::REUSEADDR:
@@ -862,7 +860,7 @@ std::pair<s32, Errno> BSD::RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& mess
} else {
ASSERT(addr.size() == sizeof(SockAddrIn));
const SockAddrIn result = Translate(addr_in);
- std::memcpy(addr.data(), &result, sizeof(result));
+ PutValue(addr, result);
}
}
@@ -886,8 +884,7 @@ std::pair<s32, Errno> BSD::SendToImpl(s32 fd, u32 flags, std::span<const u8> mes
Network::SockAddrIn* p_addr_in = nullptr;
if (!addr.empty()) {
ASSERT(addr.size() == sizeof(SockAddrIn));
- SockAddrIn guest_addr_in;
- std::memcpy(&guest_addr_in, addr.data(), sizeof(guest_addr_in));
+ auto guest_addr_in = GetValue<SockAddrIn>(addr);
addr_in = Translate(guest_addr_in);
p_addr_in = &addr_in;
}
diff --git a/src/core/hle/service/sockets/bsd.h b/src/core/hle/service/sockets/bsd.h
index 161f22b9b..4f69d382c 100644
--- a/src/core/hle/service/sockets/bsd.h
+++ b/src/core/hle/service/sockets/bsd.h
@@ -163,7 +163,7 @@ private:
Errno ListenImpl(s32 fd, s32 backlog);
std::pair<s32, Errno> FcntlImpl(s32 fd, FcntlCmd cmd, s32 arg);
Errno GetSockOptImpl(s32 fd, u32 level, OptName optname, std::vector<u8>& optval);
- Errno SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, const void* optval);
+ Errno SetSockOptImpl(s32 fd, u32 level, OptName optname, std::span<const u8> optval);
Errno ShutdownImpl(s32 fd, s32 how);
std::pair<s32, Errno> RecvImpl(s32 fd, u32 flags, std::vector<u8>& message);
std::pair<s32, Errno> RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& message,
diff --git a/src/core/hle/service/time/clock_types.h b/src/core/hle/service/time/clock_types.h
index 9fc01ea90..7149fffeb 100644
--- a/src/core/hle/service/time/clock_types.h
+++ b/src/core/hle/service/time/clock_types.h
@@ -11,6 +11,11 @@
#include "core/hle/service/time/errors.h"
#include "core/hle/service/time/time_zone_types.h"
+// Defined by WinBase.h on Windows
+#ifdef GetCurrentTime
+#undef GetCurrentTime
+#endif
+
namespace Service::Time::Clock {
enum class TimeType : u8 {
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index f0b5eff8a..d30f49877 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -35,7 +35,7 @@ static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_cont
return {
buffer_queue_core,
std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core, nvmap),
- std::make_unique<android::BufferQueueConsumer>(buffer_queue_core, nvmap)};
+ std::make_unique<android::BufferQueueConsumer>(buffer_queue_core)};
}
Display::Display(u64 id, std::string name_,
diff --git a/src/core/loader/deconstructed_rom_directory.cpp b/src/core/loader/deconstructed_rom_directory.cpp
index 5c36b71e5..60ee78e89 100644
--- a/src/core/loader/deconstructed_rom_directory.cpp
+++ b/src/core/loader/deconstructed_rom_directory.cpp
@@ -3,6 +3,7 @@
#include <cstring>
#include "common/logging/log.h"
+#include "common/settings.h"
#include "core/core.h"
#include "core/file_sys/content_archive.h"
#include "core/file_sys/control_metadata.h"
@@ -14,6 +15,10 @@
#include "core/loader/deconstructed_rom_directory.h"
#include "core/loader/nso.h"
+#ifdef HAS_NCE
+#include "core/arm/nce/patcher.h"
+#endif
+
namespace Loader {
AppLoader_DeconstructedRomDirectory::AppLoader_DeconstructedRomDirectory(FileSys::VirtualFile file_,
@@ -124,21 +129,43 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
}
metadata.Print();
- const auto static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2",
- "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7",
- "subsdk8", "subsdk9", "sdk"};
+ // Enable NCE only for programs with 39-bit address space.
+ const bool is_39bit =
+ metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is39Bit;
+ Settings::SetNceEnabled(is_39bit);
+
+ const std::array static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2",
+ "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7",
+ "subsdk8", "subsdk9", "sdk"};
- // Use the NSO module loader to figure out the code layout
std::size_t code_size{};
- for (const auto& module : static_modules) {
+
+ // Define an nce patch context for each potential module.
+#ifdef HAS_NCE
+ std::array<Core::NCE::Patcher, 13> module_patchers;
+#endif
+
+ const auto GetPatcher = [&](size_t i) -> Core::NCE::Patcher* {
+#ifdef HAS_NCE
+ if (Settings::IsNceEnabled()) {
+ return &module_patchers[i];
+ }
+#endif
+ return nullptr;
+ };
+
+ // Use the NSO module loader to figure out the code layout
+ for (size_t i = 0; i < static_modules.size(); i++) {
+ const auto& module = static_modules[i];
const FileSys::VirtualFile module_file{dir->GetFile(module)};
if (!module_file) {
continue;
}
const bool should_pass_arguments = std::strcmp(module, "rtld") == 0;
- const auto tentative_next_load_addr = AppLoader_NSO::LoadModule(
- process, system, *module_file, code_size, should_pass_arguments, false);
+ const auto tentative_next_load_addr =
+ AppLoader_NSO::LoadModule(process, system, *module_file, code_size,
+ should_pass_arguments, false, {}, GetPatcher(i));
if (!tentative_next_load_addr) {
return {ResultStatus::ErrorLoadingNSO, {}};
}
@@ -146,8 +173,18 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
code_size = *tentative_next_load_addr;
}
+ // Enable direct memory mapping in case of NCE.
+ const u64 fastmem_base = [&]() -> size_t {
+ if (Settings::IsNceEnabled()) {
+ auto& buffer = system.DeviceMemory().buffer;
+ buffer.EnableDirectMappedAddress();
+ return reinterpret_cast<u64>(buffer.VirtualBasePointer());
+ }
+ return 0;
+ }();
+
// Setup the process code layout
- if (process.LoadFromMetadata(metadata, code_size, is_hbl).IsError()) {
+ if (process.LoadFromMetadata(metadata, code_size, fastmem_base, is_hbl).IsError()) {
return {ResultStatus::ErrorUnableToParseKernelMetadata, {}};
}
@@ -157,7 +194,8 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
VAddr next_load_addr{base_address};
const FileSys::PatchManager pm{metadata.GetTitleID(), system.GetFileSystemController(),
system.GetContentProvider()};
- for (const auto& module : static_modules) {
+ for (size_t i = 0; i < static_modules.size(); i++) {
+ const auto& module = static_modules[i];
const FileSys::VirtualFile module_file{dir->GetFile(module)};
if (!module_file) {
continue;
@@ -165,15 +203,16 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
const VAddr load_addr{next_load_addr};
const bool should_pass_arguments = std::strcmp(module, "rtld") == 0;
- const auto tentative_next_load_addr = AppLoader_NSO::LoadModule(
- process, system, *module_file, load_addr, should_pass_arguments, true, pm);
+ const auto tentative_next_load_addr =
+ AppLoader_NSO::LoadModule(process, system, *module_file, load_addr,
+ should_pass_arguments, true, pm, GetPatcher(i));
if (!tentative_next_load_addr) {
return {ResultStatus::ErrorLoadingNSO, {}};
}
next_load_addr = *tentative_next_load_addr;
modules.insert_or_assign(load_addr, module);
- LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", module, load_addr);
+ LOG_DEBUG(Loader, "loaded module {} @ {:#X}", module, load_addr);
}
// Find the RomFS by searching for a ".romfs" file in this directory
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index bf56a08b4..cd6982921 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -91,7 +91,8 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::KProcess& process,
// Setup the process code layout
if (process
- .LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size(), false)
+ .LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size(), 0,
+ false)
.IsError()) {
return {ResultStatus::ErrorNotInitialized, {}};
}
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 69f1a54ed..e74697cda 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -22,6 +22,10 @@
#include "core/loader/nso.h"
#include "core/memory.h"
+#ifdef HAS_NCE
+#include "core/arm/nce/patcher.h"
+#endif
+
namespace Loader {
struct NroSegmentHeader {
@@ -139,7 +143,8 @@ static constexpr u32 PageAlignSize(u32 size) {
return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
}
-static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) {
+static bool LoadNroImpl(Core::System& system, Kernel::KProcess& process,
+ const std::vector<u8>& data) {
if (data.size() < sizeof(NroHeader)) {
return {};
}
@@ -194,14 +199,61 @@ static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data)
codeset.DataSegment().size += bss_size;
program_image.resize(static_cast<u32>(program_image.size()) + bss_size);
+ size_t image_size = program_image.size();
+
+#ifdef HAS_NCE
+ const auto& code = codeset.CodeSegment();
+
+ // NROs always have a 39-bit address space.
+ Settings::SetNceEnabled(true);
+
+ // Create NCE patcher
+ Core::NCE::Patcher patch{};
+
+ if (Settings::IsNceEnabled()) {
+ // Patch SVCs and MRS calls in the guest code
+ patch.PatchText(program_image, code);
+
+ // We only support PostData patching for NROs.
+ ASSERT(patch.GetPatchMode() == Core::NCE::PatchMode::PostData);
+
+ // Update patch section.
+ auto& patch_segment = codeset.PatchSegment();
+ patch_segment.addr = image_size;
+ patch_segment.size = static_cast<u32>(patch.GetSectionSize());
+
+ // Add patch section size to the module size.
+ image_size += patch_segment.size;
+ }
+#endif
+
+ // Enable direct memory mapping in case of NCE.
+ const u64 fastmem_base = [&]() -> size_t {
+ if (Settings::IsNceEnabled()) {
+ auto& buffer = system.DeviceMemory().buffer;
+ buffer.EnableDirectMappedAddress();
+ return reinterpret_cast<u64>(buffer.VirtualBasePointer());
+ }
+ return 0;
+ }();
// Setup the process code layout
if (process
- .LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size(), false)
+ .LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), image_size, fastmem_base,
+ false)
.IsError()) {
return false;
}
+ // Relocate code patch and copy to the program_image if running under NCE.
+ // This needs to be after LoadFromMetadata so we can use the process entry point.
+#ifdef HAS_NCE
+ if (Settings::IsNceEnabled()) {
+ patch.RelocateAndCopy(process.GetEntryPoint(), code, program_image,
+ &process.GetPostHandlers());
+ }
+#endif
+
// Load codeset for current process
codeset.memory = std::move(program_image);
process.LoadModule(std::move(codeset), process.GetEntryPoint());
@@ -209,8 +261,9 @@ static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data)
return true;
}
-bool AppLoader_NRO::LoadNro(Kernel::KProcess& process, const FileSys::VfsFile& nro_file) {
- return LoadNroImpl(process, nro_file.ReadAllBytes());
+bool AppLoader_NRO::LoadNro(Core::System& system, Kernel::KProcess& process,
+ const FileSys::VfsFile& nro_file) {
+ return LoadNroImpl(system, process, nro_file.ReadAllBytes());
}
AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::KProcess& process, Core::System& system) {
@@ -218,7 +271,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::KProcess& process, Core::S
return {ResultStatus::ErrorAlreadyLoaded, {}};
}
- if (!LoadNro(process, *file)) {
+ if (!LoadNro(system, process, *file)) {
return {ResultStatus::ErrorLoadingNRO, {}};
}
diff --git a/src/core/loader/nro.h b/src/core/loader/nro.h
index 8de6eebc6..d2928cba0 100644
--- a/src/core/loader/nro.h
+++ b/src/core/loader/nro.h
@@ -54,7 +54,7 @@ public:
bool IsRomFSUpdatable() const override;
private:
- bool LoadNro(Kernel::KProcess& process, const FileSys::VfsFile& nro_file);
+ bool LoadNro(Core::System& system, Kernel::KProcess& process, const FileSys::VfsFile& nro_file);
std::vector<u8> icon_data;
std::unique_ptr<FileSys::NACP> nacp;
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 1350da8dc..b053a0d14 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -20,6 +20,10 @@
#include "core/loader/nso.h"
#include "core/memory.h"
+#ifdef HAS_NCE
+#include "core/arm/nce/patcher.h"
+#endif
+
namespace Loader {
namespace {
struct MODHeader {
@@ -72,7 +76,8 @@ FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& in_file) {
std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::System& system,
const FileSys::VfsFile& nso_file, VAddr load_base,
bool should_pass_arguments, bool load_into_process,
- std::optional<FileSys::PatchManager> pm) {
+ std::optional<FileSys::PatchManager> pm,
+ Core::NCE::Patcher* patch) {
if (nso_file.GetSize() < sizeof(NSOHeader)) {
return std::nullopt;
}
@@ -86,6 +91,16 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
return std::nullopt;
}
+ // Allocate some space at the beginning if we are patching in PreText mode.
+ const size_t module_start = [&]() -> size_t {
+#ifdef HAS_NCE
+ if (patch && patch->GetPatchMode() == Core::NCE::PatchMode::PreText) {
+ return patch->GetSectionSize();
+ }
+#endif
+ return 0;
+ }();
+
// Build program image
Kernel::CodeSet codeset;
Kernel::PhysicalMemory program_image;
@@ -95,11 +110,12 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
if (nso_header.IsSegmentCompressed(i)) {
data = DecompressSegment(data, nso_header.segments[i]);
}
- program_image.resize(nso_header.segments[i].location + static_cast<u32>(data.size()));
- std::memcpy(program_image.data() + nso_header.segments[i].location, data.data(),
- data.size());
- codeset.segments[i].addr = nso_header.segments[i].location;
- codeset.segments[i].offset = nso_header.segments[i].location;
+ program_image.resize(module_start + nso_header.segments[i].location +
+ static_cast<u32>(data.size()));
+ std::memcpy(program_image.data() + module_start + nso_header.segments[i].location,
+ data.data(), data.size());
+ codeset.segments[i].addr = module_start + nso_header.segments[i].location;
+ codeset.segments[i].offset = module_start + nso_header.segments[i].location;
codeset.segments[i].size = nso_header.segments[i].size;
}
@@ -118,7 +134,7 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
}
codeset.DataSegment().size += nso_header.segments[2].bss_size;
- const u32 image_size{
+ u32 image_size{
PageAlignSize(static_cast<u32>(program_image.size()) + nso_header.segments[2].bss_size)};
program_image.resize(image_size);
@@ -129,15 +145,44 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
// Apply patches if necessary
const auto name = nso_file.GetName();
if (pm && (pm->HasNSOPatch(nso_header.build_id, name) || Settings::values.dump_nso)) {
- std::vector<u8> pi_header(sizeof(NSOHeader) + program_image.size());
+ std::span<u8> patchable_section(program_image.data() + module_start,
+ program_image.size() - module_start);
+ std::vector<u8> pi_header(sizeof(NSOHeader) + patchable_section.size());
std::memcpy(pi_header.data(), &nso_header, sizeof(NSOHeader));
- std::memcpy(pi_header.data() + sizeof(NSOHeader), program_image.data(),
- program_image.size());
+ std::memcpy(pi_header.data() + sizeof(NSOHeader), patchable_section.data(),
+ patchable_section.size());
pi_header = pm->PatchNSO(pi_header, name);
- std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.data());
+ std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), patchable_section.data());
+ }
+
+#ifdef HAS_NCE
+ // If we are computing the process code layout and using nce backend, patch.
+ const auto& code = codeset.CodeSegment();
+ if (patch && patch->GetPatchMode() == Core::NCE::PatchMode::None) {
+ // Patch SVCs and MRS calls in the guest code
+ patch->PatchText(program_image, code);
+
+ // Add patch section size to the module size.
+ image_size += static_cast<u32>(patch->GetSectionSize());
+ } else if (patch) {
+ // Relocate code patch and copy to the program_image.
+ patch->RelocateAndCopy(load_base, code, program_image, &process.GetPostHandlers());
+
+ // Update patch section.
+ auto& patch_segment = codeset.PatchSegment();
+ patch_segment.addr =
+ patch->GetPatchMode() == Core::NCE::PatchMode::PreText ? 0 : image_size;
+ patch_segment.size = static_cast<u32>(patch->GetSectionSize());
+
+ // Add patch section size to the module size. In PreText mode image_size
+ // already contains the patch segment as part of module_start.
+ if (patch->GetPatchMode() == Core::NCE::PatchMode::PostData) {
+ image_size += patch_segment.size;
+ }
}
+#endif
// If we aren't actually loading (i.e. just computing the process code layout), we are done
if (!load_into_process) {
diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h
index 0b53b4ecd..29b86ed4c 100644
--- a/src/core/loader/nso.h
+++ b/src/core/loader/nso.h
@@ -15,6 +15,10 @@ namespace Core {
class System;
}
+namespace Core::NCE {
+class Patcher;
+}
+
namespace Kernel {
class KProcess;
}
@@ -88,7 +92,8 @@ public:
static std::optional<VAddr> LoadModule(Kernel::KProcess& process, Core::System& system,
const FileSys::VfsFile& nso_file, VAddr load_base,
bool should_pass_arguments, bool load_into_process,
- std::optional<FileSys::PatchManager> pm = {});
+ std::optional<FileSys::PatchManager> pm = {},
+ Core::NCE::Patcher* patch = nullptr);
LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index fa5273402..5b376b202 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -1,8 +1,10 @@
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-FileCopyrightText: 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cstring>
+#include <mutex>
#include <span>
#include "common/assert.h"
@@ -10,6 +12,7 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/page_table.h"
+#include "common/scope_exit.h"
#include "common/settings.h"
#include "common/swap.h"
#include "core/core.h"
@@ -41,7 +44,7 @@ struct Memory::Impl {
explicit Impl(Core::System& system_) : system{system_} {}
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
- current_page_table = &process.GetPageTable().PageTableImpl();
+ current_page_table = &process.GetPageTable().GetImpl();
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth();
@@ -50,7 +53,7 @@ struct Memory::Impl {
}
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target) {
+ Common::PhysicalAddress target, Common::MemoryPermission perms) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -60,7 +63,7 @@ struct Memory::Impl {
if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Map(GetInteger(base),
- GetInteger(target) - DramMemoryMap::Base, size);
+ GetInteger(target) - DramMemoryMap::Base, size, perms);
}
}
@@ -75,6 +78,51 @@ struct Memory::Impl {
}
}
+ void ProtectRegion(Common::PageTable& page_table, VAddr vaddr, u64 size,
+ Common::MemoryPermission perms) {
+ ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
+
+ if (!Settings::IsFastmemEnabled()) {
+ return;
+ }
+
+ const bool is_r = True(perms & Common::MemoryPermission::Read);
+ const bool is_w = True(perms & Common::MemoryPermission::Write);
+ const bool is_x =
+ True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
+
+ if (!current_page_table) {
+ system.DeviceMemory().buffer.Protect(vaddr, size, is_r, is_w, is_x);
+ return;
+ }
+
+ u64 protect_bytes{};
+ u64 protect_begin{};
+ for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
+ const Common::PageType page_type{
+ current_page_table->pointers[addr >> YUZU_PAGEBITS].Type()};
+ switch (page_type) {
+ case Common::PageType::RasterizerCachedMemory:
+ if (protect_bytes > 0) {
+ system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w,
+ is_x);
+ protect_bytes = 0;
+ }
+ break;
+ default:
+ if (protect_bytes == 0) {
+ protect_begin = addr;
+ }
+ protect_bytes += YUZU_PAGESIZE;
+ }
+ }
+
+ if (protect_bytes > 0) {
+ system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x);
+ }
+ }
+
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
const Common::PhysicalAddress paddr{
current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
@@ -195,7 +243,7 @@ struct Memory::Impl {
bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
auto on_memory, auto on_rasterizer, auto increment) {
- const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl();
+ const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl();
std::size_t remaining_size = size;
std::size_t page_index = addr >> YUZU_PAGEBITS;
std::size_t page_offset = addr & YUZU_PAGEMASK;
@@ -318,7 +366,7 @@ struct Memory::Impl {
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
u8* const host_ptr) {
if constexpr (!UNSAFE) {
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
+ HandleRasterizerWrite(GetInteger(current_vaddr), copy_amount);
}
std::memcpy(host_ptr, src_buffer, copy_amount);
},
@@ -351,7 +399,7 @@ struct Memory::Impl {
},
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
u8* const host_ptr) {
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
+ HandleRasterizerWrite(GetInteger(current_vaddr), copy_amount);
std::memset(host_ptr, 0, copy_amount);
},
[](const std::size_t copy_amount) {});
@@ -420,7 +468,7 @@ struct Memory::Impl {
const std::size_t block_size) {
// dc cvac: Store to point of coherency
// CPU flush -> GPU invalidate
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
+ HandleRasterizerWrite(GetInteger(current_vaddr), block_size);
};
return PerformCacheOperation(dest_addr, size, on_rasterizer);
}
@@ -430,7 +478,7 @@ struct Memory::Impl {
const std::size_t block_size) {
// dc civac: Store to point of coherency, and invalidate from cache
// CPU flush -> GPU invalidate
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
+ HandleRasterizerWrite(GetInteger(current_vaddr), block_size);
};
return PerformCacheOperation(dest_addr, size, on_rasterizer);
}
@@ -767,7 +815,18 @@ struct Memory::Impl {
}
void HandleRasterizerWrite(VAddr address, size_t size) {
- const size_t core = system.GetCurrentHostThreadID();
+ constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
+ const size_t core = std::min(system.GetCurrentHostThreadID(),
+ sys_core); // any other calls threads go to syscore.
+ // Guard on sys_core;
+ if (core == sys_core) [[unlikely]] {
+ sys_core_guard.lock();
+ }
+ SCOPE_EXIT({
+ if (core == sys_core) [[unlikely]] {
+ sys_core_guard.unlock();
+ }
+ });
auto& current_area = rasterizer_write_areas[core];
VAddr subaddress = address >> YUZU_PAGEBITS;
bool do_collection = current_area.last_address == subaddress;
@@ -799,6 +858,7 @@ struct Memory::Impl {
rasterizer_read_areas{};
std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
+ std::mutex sys_core_guard;
};
Memory::Memory(Core::System& system_) : system{system_} {
@@ -816,17 +876,22 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
}
void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target) {
- impl->MapMemoryRegion(page_table, base, size, target);
+ Common::PhysicalAddress target, Common::MemoryPermission perms) {
+ impl->MapMemoryRegion(page_table, base, size, target, perms);
}
void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
impl->UnmapRegion(page_table, base, size);
}
+void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
+ Common::MemoryPermission perms) {
+ impl->ProtectRegion(page_table, GetInteger(vaddr), size, perms);
+}
+
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
const Kernel::KProcess& process = *system.ApplicationProcess();
- const auto& page_table = process.GetPageTable().PageTableImpl();
+ const auto& page_table = process.GetPageTable().GetImpl();
const size_t page = vaddr >> YUZU_PAGEBITS;
if (page >= page_table.pointers.size()) {
return false;
@@ -986,4 +1051,17 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
impl->FlushRegion(dest_addr, size);
}
+bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
+ bool mapped = true;
+ u8* const ptr = impl->GetPointerImpl(
+ GetInteger(vaddr),
+ [&] {
+ LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size,
+ GetInteger(vaddr));
+ mapped = false;
+ },
+ [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); });
+ return mapped && ptr != nullptr;
+}
+
} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index 13047a545..ed8ebb5eb 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -15,8 +15,9 @@
#include "core/hle/result.h"
namespace Common {
+enum class MemoryPermission : u32;
struct PageTable;
-}
+} // namespace Common
namespace Core {
class System;
@@ -82,9 +83,10 @@ public:
* @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least
* `size`.
+ * @param perms The permissions to map the memory with.
*/
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target);
+ Common::PhysicalAddress target, Common::MemoryPermission perms);
/**
* Unmaps a region of the emulated process address space.
@@ -96,6 +98,17 @@ public:
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size);
/**
+ * Protects a region of the emulated process address space with the new permissions.
+ *
+ * @param page_table The page table of the emulated process.
+ * @param base The start address to re-protect. Must be page-aligned.
+ * @param size The amount of bytes to protect. Must be page-aligned.
+ * @param perms The permissions the address range is mapped.
+ */
+ void ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ Common::MemoryPermission perms);
+
+ /**
* Checks whether or not the supplied address is a valid virtual
* address for the current process.
*
@@ -472,6 +485,7 @@ public:
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
+ bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
private:
diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp
index 53a89cc8f..db30ba598 100644
--- a/src/core/memory/cheat_engine.cpp
+++ b/src/core/memory/cheat_engine.cpp
@@ -10,7 +10,8 @@
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/service/hid/controllers/npad.h"
-#include "core/hle/service/hid/hid.h"
+#include "core/hle/service/hid/hid_server.h"
+#include "core/hle/service/hid/resource_manager.h"
#include "core/hle/service/sm/sm.h"
#include "core/memory.h"
#include "core/memory/cheat_engine.h"
@@ -54,23 +55,20 @@ void StandardVmCallbacks::MemoryWrite(VAddr address, const void* data, u64 size)
}
u64 StandardVmCallbacks::HidKeysDown() {
- const auto hid = system.ServiceManager().GetService<Service::HID::Hid>("hid");
+ const auto hid = system.ServiceManager().GetService<Service::HID::IHidServer>("hid");
if (hid == nullptr) {
LOG_WARNING(CheatEngine, "Attempted to read input state, but hid is not initialized!");
return 0;
}
- const auto applet_resource = hid->GetAppletResource();
+ const auto applet_resource = hid->GetResourceManager();
if (applet_resource == nullptr) {
LOG_WARNING(CheatEngine,
"Attempted to read input state, but applet resource is not initialized!");
return 0;
}
- const auto press_state =
- applet_resource
- ->GetController<Service::HID::Controller_NPad>(Service::HID::HidController::NPad)
- .GetAndResetPressState();
+ const auto press_state = applet_resource->GetNpad()->GetAndResetPressState();
return static_cast<u64>(press_state & HID::NpadButton::All);
}
diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp
index ed875d444..5d168cbc1 100644
--- a/src/core/reporter.cpp
+++ b/src/core/reporter.cpp
@@ -116,7 +116,7 @@ json GetProcessorStateDataAuto(Core::System& system) {
Core::ARM_Interface::ThreadContext64 context{};
arm.SaveContext(context);
- return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32",
+ return GetProcessorStateData(process->Is64Bit() ? "AArch64" : "AArch32",
GetInteger(process->GetEntryPoint()), context.sp, context.pc,
context.pstate, context.cpu_registers);
}