summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/CMakeLists.txt4
-rw-r--r--src/core/core.cpp2
-rw-r--r--src/core/frontend/applets/controller.cpp10
-rw-r--r--src/core/hid/emulated_console.cpp3
-rw-r--r--src/core/hid/emulated_console.h2
-rw-r--r--src/core/hid/emulated_controller.cpp65
-rw-r--r--src/core/hid/emulated_controller.h16
-rw-r--r--src/core/hid/hid_types.h7
-rw-r--r--src/core/hid/motion_input.cpp12
-rw-r--r--src/core/hid/motion_input.h5
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp6
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp34
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.h17
-rw-r--r--src/core/hle/kernel/k_memory_block.h2
-rw-r--r--src/core/hle/kernel/k_page_table.cpp194
-rw-r--r--src/core/hle/kernel/k_page_table.h20
-rw-r--r--src/core/hle/kernel/k_process.cpp22
-rw-r--r--src/core/hle/kernel/k_process.h4
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp8
-rw-r--r--src/core/hle/kernel/k_thread.cpp46
-rw-r--r--src/core/hle/kernel/k_thread.h6
-rw-r--r--src/core/hle/kernel/kernel.cpp5
-rw-r--r--src/core/hle/kernel/svc.cpp108
-rw-r--r--src/core/hle/kernel/svc_common.h5
-rw-r--r--src/core/hle/kernel/svc_wrap.h8
-rw-r--r--src/core/hle/lock.cpp9
-rw-r--r--src/core/hle/lock.h18
-rw-r--r--src/core/hle/service/bcat/backend/backend.cpp12
-rw-r--r--src/core/hle/service/bcat/backend/backend.h5
-rw-r--r--src/core/hle/service/hid/controllers/console_sixaxis.cpp20
-rw-r--r--src/core/hle/service/hid/controllers/console_sixaxis.h10
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp7
-rw-r--r--src/core/hle/service/hid/controllers/npad.h7
-rw-r--r--src/core/hle/service/hid/hid.cpp2
-rw-r--r--src/core/hle/service/nfp/nfp.cpp2
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.cpp4
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.h5
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp3
-rw-r--r--src/core/loader/kip.cpp8
39 files changed, 488 insertions, 235 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 49bed614a..b1a746727 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -187,6 +187,8 @@ add_library(core STATIC
hle/kernel/k_event.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
+ hle/kernel/k_interrupt_manager.cpp
+ hle/kernel/k_interrupt_manager.h
hle/kernel/k_light_condition_variable.cpp
hle/kernel/k_light_condition_variable.h
hle/kernel/k_light_lock.cpp
@@ -265,8 +267,6 @@ add_library(core STATIC
hle/kernel/svc_wrap.h
hle/kernel/time_manager.cpp
hle/kernel/time_manager.h
- hle/lock.cpp
- hle/lock.h
hle/result.h
hle/service/acc/acc.cpp
hle/service/acc/acc.h
diff --git a/src/core/core.cpp b/src/core/core.cpp
index aa96f709b..3f9a7f44b 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -317,6 +317,8 @@ struct System::Impl {
is_powered_on = false;
exit_lock = false;
+ gpu_core->NotifyShutdown();
+
services.reset();
service_manager.reset();
cheat_engine.reset();
diff --git a/src/core/frontend/applets/controller.cpp b/src/core/frontend/applets/controller.cpp
index 6dbd38ffa..e1033b634 100644
--- a/src/core/frontend/applets/controller.cpp
+++ b/src/core/frontend/applets/controller.cpp
@@ -45,26 +45,26 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb
// Pro Controller -> Dual Joycons -> Left Joycon/Right Joycon -> Handheld
if (parameters.allow_pro_controller) {
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::ProController);
- controller->Connect();
+ controller->Connect(true);
} else if (parameters.allow_dual_joycons) {
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::JoyconDual);
- controller->Connect();
+ controller->Connect(true);
} else if (parameters.allow_left_joycon && parameters.allow_right_joycon) {
// Assign left joycons to even player indices and right joycons to odd player indices.
// We do this since Captain Toad Treasure Tracker expects a left joycon for Player 1 and
// a right Joycon for Player 2 in 2 Player Assist mode.
if (index % 2 == 0) {
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::JoyconLeft);
- controller->Connect();
+ controller->Connect(true);
} else {
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::JoyconRight);
- controller->Connect();
+ controller->Connect(true);
}
} else if (index == 0 && parameters.enable_single_mode && parameters.allow_handheld &&
!Settings::values.use_docked_mode.GetValue()) {
// We should *never* reach here under any normal circumstances.
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::Handheld);
- controller->Connect();
+ controller->Connect(true);
} else {
UNREACHABLE_MSG("Unable to add a new controller based on the given parameters!");
}
diff --git a/src/core/hid/emulated_console.cpp b/src/core/hid/emulated_console.cpp
index 685ec080c..08f8af551 100644
--- a/src/core/hid/emulated_console.cpp
+++ b/src/core/hid/emulated_console.cpp
@@ -161,7 +161,10 @@ void EmulatedConsole::SetMotion(const Common::Input::CallbackStatus& callback) {
motion.rotation = emulated.GetGyroscope();
motion.orientation = emulated.GetOrientation();
motion.quaternion = emulated.GetQuaternion();
+ motion.gyro_bias = emulated.GetGyroBias();
motion.is_at_rest = !emulated.IsMoving(motion_sensitivity);
+ // Find what is this value
+ motion.verticalization_error = 0.0f;
TriggerOnChange(ConsoleTriggerType::Motion);
}
diff --git a/src/core/hid/emulated_console.h b/src/core/hid/emulated_console.h
index 3afd284d5..707419102 100644
--- a/src/core/hid/emulated_console.h
+++ b/src/core/hid/emulated_console.h
@@ -50,6 +50,8 @@ struct ConsoleMotion {
Common::Vec3f rotation{};
std::array<Common::Vec3f, 3> orientation{};
Common::Quaternion<f32> quaternion{};
+ Common::Vec3f gyro_bias{};
+ f32 verticalization_error{};
bool is_at_rest{};
};
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp
index 93372445b..71fc05807 100644
--- a/src/core/hid/emulated_controller.cpp
+++ b/src/core/hid/emulated_controller.cpp
@@ -843,23 +843,18 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v
}
bool EmulatedController::TestVibration(std::size_t device_index) {
- if (device_index >= output_devices.size()) {
- return false;
- }
- if (!output_devices[device_index]) {
- return false;
- }
-
- // Send a slight vibration to test for rumble support
- constexpr Common::Input::VibrationStatus status = {
+ static constexpr VibrationValue test_vibration = {
.low_amplitude = 0.001f,
.low_frequency = 160.0f,
.high_amplitude = 0.001f,
.high_frequency = 320.0f,
- .type = Common::Input::VibrationAmplificationType::Linear,
};
- return output_devices[device_index]->SetVibration(status) ==
- Common::Input::VibrationError::None;
+
+ // Send a slight vibration to test for rumble support
+ SetVibration(device_index, test_vibration);
+
+ // Stop any vibration and return the result
+ return SetVibration(device_index, DEFAULT_VIBRATION_VALUE);
}
void EmulatedController::SetLedPattern() {
@@ -884,15 +879,42 @@ void EmulatedController::SetSupportedNpadStyleTag(NpadStyleTag supported_styles)
if (!is_connected) {
return;
}
- if (!IsControllerSupported()) {
- LOG_ERROR(Service_HID, "Controller type {} is not supported. Disconnecting controller",
- npad_type);
- Disconnect();
+ if (IsControllerSupported()) {
+ return;
}
+
+ Disconnect();
+
+ // Fallback fullkey controllers to Pro controllers
+ if (IsControllerFullkey() && supported_style_tag.fullkey) {
+ LOG_WARNING(Service_HID, "Reconnecting controller type {} as Pro controller", npad_type);
+ SetNpadStyleIndex(NpadStyleIndex::ProController);
+ Connect();
+ return;
+ }
+
+ LOG_ERROR(Service_HID, "Controller type {} is not supported. Disconnecting controller",
+ npad_type);
}
-bool EmulatedController::IsControllerSupported() const {
- switch (npad_type) {
+bool EmulatedController::IsControllerFullkey(bool use_temporary_value) const {
+ const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
+ switch (type) {
+ case NpadStyleIndex::ProController:
+ case NpadStyleIndex::GameCube:
+ case NpadStyleIndex::NES:
+ case NpadStyleIndex::SNES:
+ case NpadStyleIndex::N64:
+ case NpadStyleIndex::SegaGenesis:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool EmulatedController::IsControllerSupported(bool use_temporary_value) const {
+ const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
+ switch (type) {
case NpadStyleIndex::ProController:
return supported_style_tag.fullkey;
case NpadStyleIndex::Handheld:
@@ -920,9 +942,10 @@ bool EmulatedController::IsControllerSupported() const {
}
}
-void EmulatedController::Connect() {
- if (!IsControllerSupported()) {
- LOG_ERROR(Service_HID, "Controller type {} is not supported", npad_type);
+void EmulatedController::Connect(bool use_temporary_value) {
+ if (!IsControllerSupported(use_temporary_value)) {
+ const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
+ LOG_ERROR(Service_HID, "Controller type {} is not supported", type);
return;
}
{
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index e42aafebc..c0994ab4d 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -167,8 +167,11 @@ public:
*/
void SetSupportedNpadStyleTag(NpadStyleTag supported_styles);
- /// Sets the connected status to true
- void Connect();
+ /**
+ * Sets the connected status to true
+ * @param use_temporary_value If true tmp_npad_type will be used
+ */
+ void Connect(bool use_temporary_value = false);
/// Sets the connected status to false
void Disconnect();
@@ -318,10 +321,17 @@ private:
void LoadTASParams();
/**
+ * @param use_temporary_value If true tmp_npad_type will be used
+ * @return true if the controller style is fullkey
+ */
+ bool IsControllerFullkey(bool use_temporary_value = false) const;
+
+ /**
* Checks the current controller type against the supported_style_tag
+ * @param use_temporary_value If true tmp_npad_type will be used
* @return true if the controller is supported
*/
- bool IsControllerSupported() const;
+ bool IsControllerSupported(bool use_temporary_value = false) const;
/**
* Updates the button status of the controller
diff --git a/src/core/hid/hid_types.h b/src/core/hid/hid_types.h
index 7c12f01fc..4eca68533 100644
--- a/src/core/hid/hid_types.h
+++ b/src/core/hid/hid_types.h
@@ -496,6 +496,13 @@ struct VibrationValue {
};
static_assert(sizeof(VibrationValue) == 0x10, "VibrationValue has incorrect size.");
+constexpr VibrationValue DEFAULT_VIBRATION_VALUE{
+ .low_amplitude = 0.0f,
+ .low_frequency = 160.0f,
+ .high_amplitude = 0.0f,
+ .high_frequency = 320.0f,
+};
+
// This is nn::hid::VibrationDeviceInfo
struct VibrationDeviceInfo {
VibrationDeviceType type{};
diff --git a/src/core/hid/motion_input.cpp b/src/core/hid/motion_input.cpp
index c25fea966..a23f192d7 100644
--- a/src/core/hid/motion_input.cpp
+++ b/src/core/hid/motion_input.cpp
@@ -23,11 +23,11 @@ void MotionInput::SetAcceleration(const Common::Vec3f& acceleration) {
}
void MotionInput::SetGyroscope(const Common::Vec3f& gyroscope) {
- gyro = gyroscope - gyro_drift;
+ gyro = gyroscope - gyro_bias;
// Auto adjust drift to minimize drift
if (!IsMoving(0.1f)) {
- gyro_drift = (gyro_drift * 0.9999f) + (gyroscope * 0.0001f);
+ gyro_bias = (gyro_bias * 0.9999f) + (gyroscope * 0.0001f);
}
if (gyro.Length2() < gyro_threshold) {
@@ -41,8 +41,8 @@ void MotionInput::SetQuaternion(const Common::Quaternion<f32>& quaternion) {
quat = quaternion;
}
-void MotionInput::SetGyroDrift(const Common::Vec3f& drift) {
- gyro_drift = drift;
+void MotionInput::SetGyroBias(const Common::Vec3f& bias) {
+ gyro_bias = bias;
}
void MotionInput::SetGyroThreshold(f32 threshold) {
@@ -192,6 +192,10 @@ Common::Vec3f MotionInput::GetGyroscope() const {
return gyro;
}
+Common::Vec3f MotionInput::GetGyroBias() const {
+ return gyro_bias;
+}
+
Common::Quaternion<f32> MotionInput::GetQuaternion() const {
return quat;
}
diff --git a/src/core/hid/motion_input.h b/src/core/hid/motion_input.h
index 5b5b420bb..bca4520fa 100644
--- a/src/core/hid/motion_input.h
+++ b/src/core/hid/motion_input.h
@@ -24,7 +24,7 @@ public:
void SetAcceleration(const Common::Vec3f& acceleration);
void SetGyroscope(const Common::Vec3f& gyroscope);
void SetQuaternion(const Common::Quaternion<f32>& quaternion);
- void SetGyroDrift(const Common::Vec3f& drift);
+ void SetGyroBias(const Common::Vec3f& bias);
void SetGyroThreshold(f32 threshold);
void EnableReset(bool reset);
@@ -36,6 +36,7 @@ public:
[[nodiscard]] std::array<Common::Vec3f, 3> GetOrientation() const;
[[nodiscard]] Common::Vec3f GetAcceleration() const;
[[nodiscard]] Common::Vec3f GetGyroscope() const;
+ [[nodiscard]] Common::Vec3f GetGyroBias() const;
[[nodiscard]] Common::Vec3f GetRotations() const;
[[nodiscard]] Common::Quaternion<f32> GetQuaternion() const;
@@ -69,7 +70,7 @@ private:
Common::Vec3f gyro;
// Vector to be substracted from gyro measurements
- Common::Vec3f gyro_drift;
+ Common::Vec3f gyro_bias;
// Minimum gyro amplitude to detect if the device is moving
f32 gyro_threshold = 0.0f;
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index 4f4e338e3..baad2c5d6 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -9,6 +9,7 @@
#include "core/hle/kernel/global_scheduler_context.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
namespace Kernel {
@@ -42,6 +43,11 @@ void GlobalSchedulerContext::PreemptThreads() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
+
+ // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result
+ // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system
+ // interrupts that may have occurred.
+ kernel.PhysicalCore(core_id).Interrupt();
}
}
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
new file mode 100644
index 000000000..e5dd39751
--- /dev/null
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -0,0 +1,34 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/hle/kernel/k_interrupt_manager.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel::KInterruptManager {
+
+void HandleInterrupt(KernelCore& kernel, s32 core_id) {
+ auto* process = kernel.CurrentProcess();
+ if (!process) {
+ return;
+ }
+
+ auto& scheduler = kernel.Scheduler(core_id);
+ auto& current_thread = *scheduler.GetCurrentThread();
+
+ // If the user disable count is set, we may need to pin the current thread.
+ if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
+ KScopedSchedulerLock sl{kernel};
+
+ // Pin the current thread.
+ process->PinCurrentThread(core_id);
+
+ // Set the interrupt flag for the thread.
+ scheduler.GetCurrentThread()->SetInterruptFlag();
+ }
+}
+
+} // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h
new file mode 100644
index 000000000..05924801e
--- /dev/null
+++ b/src/core/hle/kernel/k_interrupt_manager.h
@@ -0,0 +1,17 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+class KernelCore;
+
+namespace KInterruptManager {
+void HandleInterrupt(KernelCore& kernel, s32 core_id);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index fd491146f..9e51c33ce 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -120,7 +120,7 @@ static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015);
enum class KMemoryPermission : u8 {
None = 0,
- Mask = static_cast<u8>(~None),
+ All = static_cast<u8>(~None),
Read = 1 << 0,
Write = 1 << 1,
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 99982e5a3..4da509224 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -264,9 +264,9 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
ASSERT(heap_last < stack_start || stack_last < heap_start);
ASSERT(heap_last < kmap_start || kmap_last < heap_start);
- current_heap_addr = heap_region_start;
- heap_capacity = 0;
- physical_memory_usage = 0;
+ current_heap_end = heap_region_start;
+ max_heap_size = 0;
+ mapped_physical_memory_size = 0;
memory_pool = pool;
page_table_impl.Resize(address_space_width, PageBits);
@@ -306,7 +306,7 @@ ResultCode KPageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std:
KMemoryState state{};
KMemoryPermission perm{};
CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, KMemoryState::All,
- KMemoryState::Normal, KMemoryPermission::Mask,
+ KMemoryState::Normal, KMemoryPermission::All,
KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask,
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
@@ -465,7 +465,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
MapPhysicalMemory(page_linked_list, addr, end_addr);
- physical_memory_usage += remaining_size;
+ mapped_physical_memory_size += remaining_size;
const std::size_t num_pages{size / PageSize};
block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
@@ -507,7 +507,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
- physical_memory_usage -= mapped_size;
+ mapped_physical_memory_size -= mapped_size;
return ResultSuccess;
}
@@ -554,7 +554,7 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KMemoryState src_state{};
CASCADE_CODE(CheckMemoryState(
&src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias,
- KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::ReadAndWrite,
+ KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::ReadAndWrite,
KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
if (IsRegionMapped(dst_addr, size)) {
@@ -593,7 +593,7 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KMemoryState src_state{};
CASCADE_CODE(CheckMemoryState(
&src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias,
- KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::None,
+ KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None,
KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
KMemoryPermission dst_perm{};
@@ -784,7 +784,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
CASCADE_CODE(CheckMemoryState(
&state, nullptr, &attribute, addr, size,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
- KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::Mask,
+ KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::All,
KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None,
KMemoryAttribute::IpcAndDeviceMapped));
@@ -806,6 +806,33 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
block_manager->Update(addr, size / PageSize, state, KMemoryPermission::ReadAndWrite);
+ return ResultSuccess;
+}
+
+ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
+ Svc::MemoryPermission svc_perm) {
+ const size_t num_pages = size / PageSize;
+
+ // Lock the table.
+ std::lock_guard lock{page_table_lock};
+
+ // Verify we can change the memory permission.
+ KMemoryState old_state;
+ KMemoryPermission old_perm;
+ R_TRY(this->CheckMemoryState(
+ std::addressof(old_state), std::addressof(old_perm), nullptr, addr, size,
+ KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Determine new perm.
+ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
+ R_SUCCEED_IF(old_perm == new_perm);
+
+ // Perform mapping operation.
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+
+ // Update the blocks.
+ block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None);
return ResultSuccess;
}
@@ -832,61 +859,125 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryA
return ResultSuccess;
}
-ResultCode KPageTable::SetHeapCapacity(std::size_t new_heap_capacity) {
+ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
+ // Lock the table.
std::lock_guard lock{page_table_lock};
- heap_capacity = new_heap_capacity;
- return ResultSuccess;
-}
-ResultVal<VAddr> KPageTable::SetHeapSize(std::size_t size) {
+ // Only process page tables are allowed to set heap size.
+ ASSERT(!this->IsKernel());
- if (size > heap_region_end - heap_region_start) {
- return ResultOutOfMemory;
- }
+ max_heap_size = size;
- const u64 previous_heap_size{GetHeapSize()};
-
- UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented");
+ return ResultSuccess;
+}
- // Increase the heap size
+ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
+ // Try to perform a reduction in heap, instead of an extension.
+ VAddr cur_address{};
+ std::size_t allocation_size{};
{
- std::lock_guard lock{page_table_lock};
-
- const u64 delta{size - previous_heap_size};
-
- // Reserve memory for the heap extension.
- KScopedResourceReservation memory_reservation(
- system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
- delta);
-
- if (!memory_reservation.Succeeded()) {
- LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta);
- return ResultLimitReached;
+ // Lock the table.
+ std::lock_guard lk(page_table_lock);
+
+ // Validate that setting heap size is possible at all.
+ R_UNLESS(!is_kernel, ResultOutOfMemory);
+ R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start),
+ ResultOutOfMemory);
+ R_UNLESS(size <= max_heap_size, ResultOutOfMemory);
+
+ if (size < GetHeapSize()) {
+ // The size being requested is less than the current size, so we need to free the end of
+ // the heap.
+
+ // Validate memory state.
+ std::size_t num_allocator_blocks;
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
+ heap_region_start + size, GetHeapSize() - size,
+ KMemoryState::All, KMemoryState::Normal,
+ KMemoryPermission::All, KMemoryPermission::ReadAndWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+
+ // Unmap the end of the heap.
+ const auto num_pages = (GetHeapSize() - size) / PageSize;
+ R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None,
+ OperationType::Unmap));
+
+ // Release the memory from the resource limit.
+ system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
+ LimitableResource::PhysicalMemory, num_pages * PageSize);
+
+ // Apply the memory block update.
+ block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None);
+
+ // Update the current heap end.
+ current_heap_end = heap_region_start + size;
+
+ // Set the output.
+ *out = heap_region_start;
+ return ResultSuccess;
+ } else if (size == GetHeapSize()) {
+ // The size requested is exactly the current size.
+ *out = heap_region_start;
+ return ResultSuccess;
+ } else {
+ // We have to allocate memory. Determine how much to allocate and where while the table
+ // is locked.
+ cur_address = current_heap_end;
+ allocation_size = size - GetHeapSize();
}
+ }
- KPageLinkedList page_linked_list;
- const std::size_t num_pages{delta / PageSize};
+ // Reserve memory for the heap extension.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
+ allocation_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
- CASCADE_CODE(
- system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool));
+ // Allocate pages for the heap extension.
+ KPageLinkedList page_linked_list;
+ R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize,
+ memory_pool));
- if (IsRegionMapped(current_heap_addr, delta)) {
- return ResultInvalidCurrentMemory;
+ // Map the pages.
+ {
+ // Lock the table.
+ std::lock_guard lk(page_table_lock);
+
+ // Ensure that the heap hasn't changed since we began executing.
+ ASSERT(cur_address == current_heap_end);
+
+ // Check the memory state.
+ std::size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end,
+ allocation_size, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None));
+
+ // Map the pages.
+ const auto num_pages = allocation_size / PageSize;
+ R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup));
+
+ // Clear all the newly allocated pages.
+ for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
+ std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0,
+ PageSize);
}
- CASCADE_CODE(
- Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup));
-
- // Succeeded in allocation, commit the resource reservation
+ // We succeeded, so commit our memory reservation.
memory_reservation.Commit();
- block_manager->Update(current_heap_addr, num_pages, KMemoryState::Normal,
- KMemoryPermission::ReadAndWrite);
+ // Apply the memory block update.
+ block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal,
+ KMemoryPermission::ReadAndWrite, KMemoryAttribute::None);
- current_heap_addr = heap_region_start + size;
- }
+ // Update the current heap end.
+ current_heap_end = heap_region_start + size;
- return heap_region_start;
+ // Set the output.
+ *out = heap_region_start;
+ return ResultSuccess;
+ }
}
ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
@@ -978,7 +1069,7 @@ ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
if (const ResultCode result{CheckMemoryState(
nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
- KMemoryState::FlagCanCodeMemory, KMemoryPermission::Mask,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::All,
KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
result.IsError()) {
return result;
@@ -1031,9 +1122,8 @@ ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
bool KPageTable::IsRegionMapped(VAddr address, u64 size) {
return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
- KMemoryPermission::Mask, KMemoryPermission::None,
- KMemoryAttribute::Mask, KMemoryAttribute::None,
- KMemoryAttribute::IpcAndDeviceMapped)
+ KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask,
+ KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)
.IsError();
}
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index d784aa67e..564410dca 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -47,10 +47,11 @@ public:
KMemoryInfo QueryInfo(VAddr addr);
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
+ ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask,
KMemoryAttribute value);
- ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
- ResultVal<VAddr> SetHeapSize(std::size_t size);
+ ResultCode SetMaxHeapSize(std::size_t size);
+ ResultCode SetHeapSize(VAddr* out, std::size_t size);
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, KMemoryState state,
@@ -182,14 +183,15 @@ public:
constexpr VAddr GetAliasCodeRegionSize() const {
return alias_code_region_end - alias_code_region_start;
}
+ size_t GetNormalMemorySize() {
+ std::lock_guard lk(page_table_lock);
+ return GetHeapSize() + mapped_physical_memory_size;
+ }
constexpr std::size_t GetAddressSpaceWidth() const {
return address_space_width;
}
- constexpr std::size_t GetHeapSize() {
- return current_heap_addr - heap_region_start;
- }
- constexpr std::size_t GetTotalHeapSize() {
- return GetHeapSize() + physical_memory_usage;
+ constexpr std::size_t GetHeapSize() const {
+ return current_heap_end - heap_region_start;
}
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
@@ -269,10 +271,8 @@ private:
VAddr code_region_end{};
VAddr alias_code_region_start{};
VAddr alias_code_region_end{};
- VAddr current_heap_addr{};
- std::size_t heap_capacity{};
- std::size_t physical_memory_usage{};
+ std::size_t mapped_physical_memory_size{};
std::size_t max_heap_size{};
std::size_t max_physical_memory_size{};
std::size_t address_space_width{};
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 90dda40dc..bf98a51e2 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -28,7 +28,6 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
-#include "core/hle/lock.h"
#include "core/memory.h"
namespace Kernel {
@@ -173,7 +172,7 @@ void KProcess::DecrementThreadCount() {
u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
- page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
+ page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size +
main_thread_stack_size};
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
capacity != pool_size) {
@@ -190,7 +189,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
}
u64 KProcess::GetTotalPhysicalMemoryUsed() const {
- return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() +
+ return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() +
GetSystemResourceSize();
}
@@ -221,30 +220,28 @@ bool KProcess::ReleaseUserException(KThread* thread) {
}
}
-void KProcess::PinCurrentThread() {
+void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- const s32 core_id = GetCurrentCoreId(kernel);
- KThread* cur_thread = GetCurrentThreadPointer(kernel);
+ KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
// Pin it.
PinThread(core_id, cur_thread);
- cur_thread->Pin();
+ cur_thread->Pin(core_id);
// An update is needed.
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
}
-void KProcess::UnpinCurrentThread() {
+void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- const s32 core_id = GetCurrentCoreId(kernel);
- KThread* cur_thread = GetCurrentThreadPointer(kernel);
+ KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
// Unpin it.
cur_thread->Unpin();
@@ -411,8 +408,8 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
resource_limit->Reserve(LimitableResource::Threads, 1);
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
- const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
- ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
+ const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
+ ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError());
ChangeStatus(ProcessStatus::Running);
@@ -543,7 +540,6 @@ void KProcess::FreeTLSRegion(VAddr tls_address) {
}
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
- std::lock_guard lock{HLE::g_hle_lock};
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
KMemoryPermission permission) {
page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index cb93c7e24..e7c8b5838 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -345,8 +345,8 @@ public:
bool IsSignaled() const override;
- void PinCurrentThread();
- void UnpinCurrentThread();
+ void PinCurrentThread(s32 core_id);
+ void UnpinCurrentThread(s32 core_id);
void UnpinThread(KThread* thread);
KLightLock& GetStateLock() {
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 277201de4..31cec990e 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -15,6 +15,7 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/cpu_manager.h"
+#include "core/hle/kernel/k_interrupt_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
@@ -53,6 +54,13 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
}
cores_pending_reschedule &= ~(1ULL << core);
}
+
+ for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
+ if (kernel.PhysicalCore(core_id).IsInterrupted()) {
+ KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id));
+ }
+ }
+
if (must_context_switch) {
auto core_scheduler = kernel.CurrentScheduler();
kernel.ExitSVCProfile();
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 752592e2e..71e029a3f 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
+#include <atomic>
#include <cinttypes>
#include <optional>
#include <vector>
@@ -26,12 +27,14 @@
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
+#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
+#include "core/memory.h"
#ifdef ARCHITECTURE_x86_64
#include "core/arm/dynarmic/arm_dynarmic_32.h"
@@ -50,6 +53,7 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
VAddr entry_point, u64 arg) {
context = {};
context.cpu_registers[0] = arg;
+ context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1;
context.pc = entry_point;
context.sp = stack_top;
// TODO(merry): Perform a hardware test to determine the below value.
@@ -61,6 +65,13 @@ namespace Kernel {
namespace {
+struct ThreadLocalRegion {
+ static constexpr std::size_t MessageBufferSize = 0x100;
+ std::array<u32, MessageBufferSize / sizeof(u32)> message_buffer;
+ std::atomic_uint16_t disable_count;
+ std::atomic_uint16_t interrupt_flag;
+};
+
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
public:
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
@@ -344,7 +355,7 @@ void KThread::StartTermination() {
if (parent != nullptr) {
parent->ReleaseUserException(this);
if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
- parent->UnpinCurrentThread();
+ parent->UnpinCurrentThread(core_id);
}
}
@@ -370,7 +381,7 @@ void KThread::StartTermination() {
this->Close();
}
-void KThread::Pin() {
+void KThread::Pin(s32 current_core) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Set ourselves as pinned.
@@ -387,7 +398,6 @@ void KThread::Pin() {
// Bind ourselves to this core.
const s32 active_core = GetActiveCore();
- const s32 current_core = GetCurrentCoreId(kernel);
SetActiveCore(current_core);
physical_ideal_core_id = current_core;
@@ -480,6 +490,36 @@ void KThread::Unpin() {
}
}
+u16 KThread::GetUserDisableCount() const {
+ if (!IsUserThread()) {
+ // We only emulate TLS for user threads
+ return {};
+ }
+
+ auto& memory = kernel.System().Memory();
+ return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count));
+}
+
+void KThread::SetInterruptFlag() {
+ if (!IsUserThread()) {
+ // We only emulate TLS for user threads
+ return;
+ }
+
+ auto& memory = kernel.System().Memory();
+ memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
+}
+
+void KThread::ClearInterruptFlag() {
+ if (!IsUserThread()) {
+ // We only emulate TLS for user threads
+ return;
+ }
+
+ auto& memory = kernel.System().Memory();
+ memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
+}
+
ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel};
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index c8a08bd71..83dfde69b 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -307,6 +307,10 @@ public:
return parent != nullptr;
}
+ u16 GetUserDisableCount() const;
+ void SetInterruptFlag();
+ void ClearInterruptFlag();
+
[[nodiscard]] KThread* GetLockOwner() const {
return lock_owner;
}
@@ -490,7 +494,7 @@ public:
this->GetStackParameters().disable_count--;
}
- void Pin();
+ void Pin(s32 current_core);
void Unpin();
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 2e4e4cb1c..1225e1fba 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -182,7 +182,10 @@ struct KernelCore::Impl {
// Shutdown all processes.
if (current_process) {
current_process->Finalize();
- current_process->Close();
+ // current_process->Close();
+ // TODO: The current process should be destroyed based on accurate ref counting after
+ // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
+ current_process->Destroy();
current_process = nullptr;
}
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index a9f7438ea..250ef9042 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -41,7 +41,6 @@
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/kernel/svc_wrap.h"
-#include "core/hle/lock.h"
#include "core/hle/result.h"
#include "core/memory.h"
#include "core/reporter.h"
@@ -136,25 +135,15 @@ enum class ResourceLimitValueType {
} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
-static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) {
- std::lock_guard lock{HLE::g_hle_lock};
- LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
-
- // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB.
- if ((heap_size % 0x200000) != 0) {
- LOG_ERROR(Kernel_SVC, "The heap size is not a multiple of 2MB, heap_size=0x{:016X}",
- heap_size);
- return ResultInvalidSize;
- }
-
- if (heap_size >= 0x200000000) {
- LOG_ERROR(Kernel_SVC, "The heap size is not less than 8GB, heap_size=0x{:016X}", heap_size);
- return ResultInvalidSize;
- }
+static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
+ LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
- auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
+ // Validate size.
+ R_UNLESS(Common::IsAligned(size, HeapSizeAlignment), ResultInvalidSize);
+ R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize);
- CASCADE_RESULT(*heap_addr, page_table.SetHeapSize(heap_size));
+ // Set the heap size.
+ R_TRY(system.Kernel().CurrentProcess()->PageTable().SetHeapSize(out_address, size));
return ResultSuccess;
}
@@ -166,9 +155,38 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s
return result;
}
+constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
+ switch (perm) {
+ case MemoryPermission::None:
+ case MemoryPermission::Read:
+ case MemoryPermission::ReadWrite:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 size,
+ MemoryPermission perm) {
+ // Validate address / size.
+ R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
+ R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
+ R_UNLESS(size > 0, ResultInvalidSize);
+ R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
+
+ // Validate the permission.
+ R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission);
+
+ // Validate that the region is in range for the current process.
+ auto& page_table = system.Kernel().CurrentProcess()->PageTable();
+ R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Set the memory attribute.
+ return page_table.SetMemoryPermission(address, size, perm);
+}
+
static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
u32 attribute) {
- std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attribute);
@@ -212,7 +230,6 @@ static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 si
/// Maps a memory range into a different range.
static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
- std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
@@ -232,7 +249,6 @@ static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr,
/// Unmaps a region that was previously mapped with svcMapMemory
static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
- std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
@@ -642,7 +658,6 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
/// Gets system/memory information for the current process
static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
u64 info_sub_id) {
- std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
info_sub_id, handle);
@@ -886,22 +901,17 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
return ResultSuccess;
}
case GetInfoType::IdleTickCount: {
- if (handle == 0) {
- LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
- static_cast<Handle>(handle));
- return ResultInvalidHandle;
- }
-
- if (info_sub_id != 0xFFFFFFFFFFFFFFFF &&
- info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) {
- LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
- return ResultInvalidCombination;
- }
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
- const auto& scheduler = *system.Kernel().CurrentScheduler();
- const auto* const idle_thread = scheduler.GetIdleThread();
+ // Verify the requested core is valid.
+ const bool core_valid =
+ (info_sub_id == static_cast<u64>(-1ULL)) ||
+ (info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex()));
+ R_UNLESS(core_valid, ResultInvalidCombination);
- *result = idle_thread->GetCpuTime();
+ // Get the idle tick count.
+ *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
return ResultSuccess;
}
default:
@@ -924,7 +934,6 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
/// Maps memory at a desired address
static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
- std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -978,7 +987,6 @@ static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size)
/// Unmaps memory previously mapped via MapPhysicalMemory
static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
- std::lock_guard lock{HLE::g_hle_lock};
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -1520,7 +1528,6 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
VAddr page_info_address, Handle process_handle,
VAddr address) {
- std::lock_guard lock{HLE::g_hle_lock};
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
@@ -2020,6 +2027,25 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::Sign
count);
}
+static void SynchronizePreemptionState(Core::System& system) {
+ auto& kernel = system.Kernel();
+
+ // Lock the scheduler.
+ KScopedSchedulerLock sl{kernel};
+
+ // If the current thread is pinned, unpin it.
+ KProcess* cur_process = system.Kernel().CurrentProcess();
+ const auto core_id = GetCurrentCoreId(kernel);
+
+ if (cur_process->GetPinnedThread(core_id) == GetCurrentThreadPointer(kernel)) {
+ // Clear the current thread's interrupt flag.
+ GetCurrentThread(kernel).ClearInterruptFlag();
+
+ // Unpin the current thread.
+ cur_process->UnpinCurrentThread(core_id);
+ }
+}
+
static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
s32 value, s32 count) {
return SignalToAddress(system, address, signal_type, value, count);
@@ -2738,7 +2764,7 @@ static const FunctionDef SVC_Table_32[] = {
static const FunctionDef SVC_Table_64[] = {
{0x00, nullptr, "Unknown"},
{0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"},
- {0x02, nullptr, "SetMemoryPermission"},
+ {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"},
{0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"},
{0x04, SvcWrap64<MapMemory>, "MapMemory"},
{0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"},
@@ -2790,7 +2816,7 @@ static const FunctionDef SVC_Table_64[] = {
{0x33, SvcWrap64<GetThreadContext>, "GetThreadContext"},
{0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"},
{0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"},
- {0x36, nullptr, "SynchronizePreemptionState"},
+ {0x36, SvcWrap64<SynchronizePreemptionState>, "SynchronizePreemptionState"},
{0x37, nullptr, "Unknown"},
{0x38, nullptr, "Unknown"},
{0x39, nullptr, "Unknown"},
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
index 60ea2c405..25de6e437 100644
--- a/src/core/hle/kernel/svc_common.h
+++ b/src/core/hle/kernel/svc_common.h
@@ -5,6 +5,7 @@
#pragma once
#include "common/common_types.h"
+#include "common/literals.h"
namespace Kernel {
using Handle = u32;
@@ -12,9 +13,13 @@ using Handle = u32;
namespace Kernel::Svc {
+using namespace Common::Literals;
+
constexpr s32 ArgumentHandleCountMax = 0x40;
constexpr u32 HandleWaitMask{1u << 30};
+constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
+
constexpr inline Handle InvalidHandle = Handle(0);
enum PseudoHandle : Handle {
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 86255fe6d..a60adfcab 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -249,6 +249,14 @@ void SvcWrap64(Core::System& system) {
func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw);
}
+// Used by SetMemoryPermission
+template <ResultCode func(Core::System&, u64, u64, Svc::MemoryPermission)>
+void SvcWrap64(Core::System& system) {
+ FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
+ static_cast<Svc::MemoryPermission>(Param(system, 2)))
+ .raw);
+}
+
// Used by MapSharedMemory
template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
diff --git a/src/core/hle/lock.cpp b/src/core/hle/lock.cpp
deleted file mode 100644
index be4bfce3b..000000000
--- a/src/core/hle/lock.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2017 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <core/hle/lock.h>
-
-namespace HLE {
-std::recursive_mutex g_hle_lock;
-}
diff --git a/src/core/hle/lock.h b/src/core/hle/lock.h
deleted file mode 100644
index 5c99fe996..000000000
--- a/src/core/hle/lock.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2017 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <mutex>
-
-namespace HLE {
-/*
- * Synchronizes access to the internal HLE kernel structures, it is acquired when a guest
- * application thread performs a syscall. It should be acquired by any host threads that read or
- * modify the HLE kernel state. Note: Any operation that directly or indirectly reads from or writes
- * to the emulated memory is not protected by this mutex, and should be avoided in any threads other
- * than the CPU thread.
- */
-extern std::recursive_mutex g_hle_lock;
-} // namespace HLE
diff --git a/src/core/hle/service/bcat/backend/backend.cpp b/src/core/hle/service/bcat/backend/backend.cpp
index 4c7d3bb6e..ee49edbb9 100644
--- a/src/core/hle/service/bcat/backend/backend.cpp
+++ b/src/core/hle/service/bcat/backend/backend.cpp
@@ -6,7 +6,6 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
-#include "core/hle/lock.h"
#include "core/hle/service/bcat/backend/backend.h"
namespace Service::BCAT {
@@ -29,10 +28,6 @@ DeliveryCacheProgressImpl& ProgressServiceBackend::GetImpl() {
return impl;
}
-void ProgressServiceBackend::SetNeedHLELock(bool need) {
- need_hle_lock = need;
-}
-
void ProgressServiceBackend::SetTotalSize(u64 size) {
impl.total_bytes = size;
SignalUpdate();
@@ -88,12 +83,7 @@ void ProgressServiceBackend::FinishDownload(ResultCode result) {
}
void ProgressServiceBackend::SignalUpdate() {
- if (need_hle_lock) {
- std::lock_guard lock(HLE::g_hle_lock);
- update_event->GetWritableEvent().Signal();
- } else {
- update_event->GetWritableEvent().Signal();
- }
+ update_event->GetWritableEvent().Signal();
}
Backend::Backend(DirectoryGetter getter) : dir_getter(std::move(getter)) {}
diff --git a/src/core/hle/service/bcat/backend/backend.h b/src/core/hle/service/bcat/backend/backend.h
index 59c6d4740..63833c927 100644
--- a/src/core/hle/service/bcat/backend/backend.h
+++ b/src/core/hle/service/bcat/backend/backend.h
@@ -71,10 +71,6 @@ class ProgressServiceBackend {
public:
~ProgressServiceBackend();
- // Clients should call this with true if any of the functions are going to be called from a
- // non-HLE thread and this class need to lock the hle mutex. (default is false)
- void SetNeedHLELock(bool need);
-
// Sets the number of bytes total in the entire download.
void SetTotalSize(u64 size);
@@ -109,7 +105,6 @@ private:
DeliveryCacheProgressImpl impl{};
Kernel::KEvent* update_event;
- bool need_hle_lock = false;
};
// A class representing an abstract backend for BCAT functionality.
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.cpp b/src/core/hle/service/hid/controllers/console_sixaxis.cpp
index f0f3105dc..a727b3582 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.cpp
+++ b/src/core/hle/service/hid/controllers/console_sixaxis.cpp
@@ -33,15 +33,14 @@ void Controller_ConsoleSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_ti
const auto& last_entry = seven_sixaxis_lifo.ReadCurrentEntry().state;
next_seven_sixaxis_state.sampling_number = last_entry.sampling_number + 1;
- // Try to read sixaxis sensor states
const auto motion_status = console->GetMotion();
+ last_global_timestamp = core_timing.GetGlobalTimeNs().count();
- console_six_axis.is_seven_six_axis_sensor_at_rest = motion_status.is_at_rest;
-
+ // This value increments every time the switch goes to sleep
+ next_seven_sixaxis_state.unknown = 1;
+ next_seven_sixaxis_state.timestamp = last_global_timestamp - last_saved_timestamp;
next_seven_sixaxis_state.accel = motion_status.accel;
- // Zero gyro values as they just mess up with the camera
- // Note: Probably a correct sensivity setting must be set
- next_seven_sixaxis_state.gyro = {};
+ next_seven_sixaxis_state.gyro = motion_status.gyro;
next_seven_sixaxis_state.quaternion = {
{
motion_status.quaternion.xyz.y,
@@ -52,9 +51,9 @@ void Controller_ConsoleSixAxis::OnUpdate(const Core::Timing::CoreTiming& core_ti
};
console_six_axis.sampling_number++;
- // TODO(German77): Find the purpose of those values
- console_six_axis.verticalization_error = 0.0f;
- console_six_axis.gyro_bias = {0.0f, 0.0f, 0.0f};
+ console_six_axis.is_seven_six_axis_sensor_at_rest = motion_status.is_at_rest;
+ console_six_axis.verticalization_error = motion_status.verticalization_error;
+ console_six_axis.gyro_bias = motion_status.gyro_bias;
// Update console six axis shared memory
std::memcpy(data + SHARED_MEMORY_OFFSET, &console_six_axis, sizeof(console_six_axis));
@@ -69,7 +68,6 @@ void Controller_ConsoleSixAxis::SetTransferMemoryPointer(u8* t_mem) {
}
void Controller_ConsoleSixAxis::ResetTimestamp() {
- seven_sixaxis_lifo.buffer_count = 0;
- seven_sixaxis_lifo.buffer_tail = 0;
+ last_saved_timestamp = last_global_timestamp;
}
} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/console_sixaxis.h b/src/core/hle/service/hid/controllers/console_sixaxis.h
index 279241858..26d153f0c 100644
--- a/src/core/hle/service/hid/controllers/console_sixaxis.h
+++ b/src/core/hle/service/hid/controllers/console_sixaxis.h
@@ -39,8 +39,9 @@ public:
private:
struct SevenSixAxisState {
- INSERT_PADDING_WORDS(4); // unused
- s64 sampling_number{};
+ INSERT_PADDING_WORDS(2); // unused
+ u64 timestamp{};
+ u64 sampling_number{};
u64 unknown{};
Common::Vec3f accel{};
Common::Vec3f gyro{};
@@ -52,9 +53,10 @@ private:
struct ConsoleSharedMemory {
u64 sampling_number{};
bool is_seven_six_axis_sensor_at_rest{};
- INSERT_PADDING_BYTES(4); // padding
+ INSERT_PADDING_BYTES(3); // padding
f32 verticalization_error{};
Common::Vec3f gyro_bias{};
+ INSERT_PADDING_BYTES(4); // padding
};
static_assert(sizeof(ConsoleSharedMemory) == 0x20, "ConsoleSharedMemory is an invalid size");
@@ -64,6 +66,8 @@ private:
Core::HID::EmulatedConsole* console;
u8* transfer_memory = nullptr;
bool is_transfer_memory_set = false;
+ u64 last_saved_timestamp{};
+ u64 last_global_timestamp{};
ConsoleSharedMemory console_six_axis{};
SevenSixAxisState next_seven_sixaxis_state{};
};
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index 2705e9dcb..e5c951e06 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -66,9 +66,9 @@ Controller_NPad::Controller_NPad(Core::HID::HIDCore& hid_core_,
auto& controller = controller_data[i];
controller.device = hid_core.GetEmulatedControllerByIndex(i);
controller.vibration[Core::HID::EmulatedDeviceIndex::LeftIndex].latest_vibration_value =
- DEFAULT_VIBRATION_VALUE;
+ Core::HID::DEFAULT_VIBRATION_VALUE;
controller.vibration[Core::HID::EmulatedDeviceIndex::RightIndex].latest_vibration_value =
- DEFAULT_VIBRATION_VALUE;
+ Core::HID::DEFAULT_VIBRATION_VALUE;
Core::HID::ControllerUpdateCallback engine_callback{
.on_change = [this,
i](Core::HID::ControllerTriggerType type) { ControllerUpdate(type, i); },
@@ -781,7 +781,8 @@ bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
Core::HID::VibrationValue vibration{0.0f, 160.0f, 0.0f, 320.0f};
controller.device->SetVibration(device_index, vibration);
// Then reset the vibration value to its default value.
- controller.vibration[device_index].latest_vibration_value = DEFAULT_VIBRATION_VALUE;
+ controller.vibration[device_index].latest_vibration_value =
+ Core::HID::DEFAULT_VIBRATION_VALUE;
}
return false;
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index 63281cb35..6b2872bad 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -90,13 +90,6 @@ public:
Default = 3,
};
- static constexpr Core::HID::VibrationValue DEFAULT_VIBRATION_VALUE{
- .low_amplitude = 0.0f,
- .low_frequency = 160.0f,
- .high_amplitude = 0.0f,
- .high_frequency = 320.0f,
- };
-
void SetSupportedStyleSet(Core::HID::NpadStyleTag style_set);
Core::HID::NpadStyleTag GetSupportedStyleSet() const;
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 7163e1a4e..6e12381fb 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -1404,7 +1404,7 @@ void Hid::SendVibrationGcErmCommand(Kernel::HLERequestContext& ctx) {
.high_frequency = 0.0f,
};
default:
- return Controller_NPad::DEFAULT_VIBRATION_VALUE;
+ return Core::HID::DEFAULT_VIBRATION_VALUE;
}
}();
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 693ffc71a..761d0d3c6 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -9,7 +9,6 @@
#include "core/core.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_event.h"
-#include "core/hle/lock.h"
#include "core/hle/service/nfp/nfp.h"
#include "core/hle/service/nfp/nfp_user.h"
@@ -337,7 +336,6 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) {
}
bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) {
- std::lock_guard lock{HLE::g_hle_lock};
if (buffer.size() < sizeof(AmiiboFile)) {
return false;
}
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp
index b4c3a6099..5fead6d1b 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue.cpp
@@ -186,6 +186,10 @@ u32 BufferQueue::Query(QueryType type) {
case QueryType::NativeWindowWidth:
case QueryType::NativeWindowHeight:
break;
+ case QueryType::NativeWindowMinUndequeuedBuffers:
+ return 0;
+ case QueryType::NativeWindowConsumerUsageBits:
+ return 0;
}
UNIMPLEMENTED_MSG("Unimplemented query type={}", type);
return 0;
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h
index 7b7baeaea..f2a579133 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.h
+++ b/src/core/hle/service/nvflinger/buffer_queue.h
@@ -57,6 +57,11 @@ public:
NativeWindowWidth = 0,
NativeWindowHeight = 1,
NativeWindowFormat = 2,
+ /// The minimum number of buffers that must remain un-dequeued after a buffer has been
+ /// queued
+ NativeWindowMinUndequeuedBuffers = 3,
+ /// The consumer gralloc usage bits currently set by the consumer
+ NativeWindowConsumerUsageBits = 10,
};
explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_,
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index a22811ec1..01e69de30 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -100,9 +100,6 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
LOG_DEBUG(Service, "Opening \"{}\" display", name);
- // TODO(Subv): Currently we only support the Default display.
- ASSERT(name == "Default");
-
const auto itr =
std::find_if(displays.begin(), displays.end(),
[&](const VI::Display& display) { return display.GetName() == name; });
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index 3ae9e6e0e..99ed34b00 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -71,7 +71,6 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::KProcess& process,
kip->GetTitleID(), 0xFFFFFFFFFFFFFFFF, 0x1FE00000,
kip->GetKernelCapabilities());
- const VAddr base_address = process.PageTable().GetCodeRegionStart();
Kernel::CodeSet codeset;
Kernel::PhysicalMemory program_image;
@@ -91,7 +90,14 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::KProcess& process,
program_image.resize(PageAlignSize(kip->GetBSSOffset()) + kip->GetBSSSize());
codeset.DataSegment().size += kip->GetBSSSize();
+ // Setup the process code layout
+ if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size())
+ .IsError()) {
+ return {ResultStatus::ErrorNotInitialized, {}};
+ }
+
codeset.memory = std::move(program_image);
+ const VAddr base_address = process.PageTable().GetCodeRegionStart();
process.LoadModule(std::move(codeset), base_address);
LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", kip->GetName(), base_address);